Skip to content

Commit

Permalink
Add support for ingress and automation for developers to test on mini…
Browse files Browse the repository at this point in the history
…kube (#56)

* add automation to create two minikube clusters

* Add --endpoint flag to transfer-pvc subcommand

* add hack/delete-clusters.sh

* garbage collect resources after trying to follow logs

* fixups: add changes to scripts from PR review

Co-authored-by: David Zager <dzager@redhat.com>

* update ingress endpoint to pass subdomain

* fixups: update bash scripts with suggestions from code review round 2

Co-authored-by: David Zager <dzager@redhat.com>

* fixups: run gofmt and goimports

* bump go mod to use latest crane-lib
  • Loading branch information
Alay Patel authored Dec 8, 2021
1 parent f45be82 commit 6704b54
Show file tree
Hide file tree
Showing 5 changed files with 246 additions and 43 deletions.
207 changes: 167 additions & 40 deletions cmd/transfer-pvc/transfer-pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import (
"os"
"time"

"github.com/konveyor/crane-lib/state_transfer"
"github.com/konveyor/crane-lib/state_transfer/endpoint"
"github.com/konveyor/crane-lib/state_transfer/endpoint/ingress"
"github.com/konveyor/crane-lib/state_transfer/endpoint/route"
"github.com/konveyor/crane-lib/state_transfer/meta"
metadata "github.com/konveyor/crane-lib/state_transfer/meta"
Expand All @@ -21,9 +21,12 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
Expand All @@ -42,6 +45,7 @@ type TransferPVCOptions struct {
DestinationContext string
PVCName string
PVCNamespace string
Endpoint string

// TODO: add more fields for PVC mapping/think of a config file to get inputs?
sourceContext *clientcmdapi.Context
Expand Down Expand Up @@ -83,6 +87,7 @@ func addFlagsForTransferPVCOptions(t *TransferPVCOptions, cmd *cobra.Command) {
cmd.Flags().StringVar(&t.DestinationContext, "destination-context", "", "The name of destination context current kubeconfig")
cmd.Flags().StringVar(&t.PVCNamespace, "pvc-namespace", "", "The namespace of the pvc which is to be transferred, if empty it will try to use the namespace in source-context, if both are empty it will error")
cmd.Flags().StringVar(&t.PVCName, "pvc-name", "", "The pvc name which is to be transferred on the source")
cmd.Flags().StringVar(&t.Endpoint, "endpoint", "nignx-ingress", "The type of networking endpoing to use to accept traffic in destination cluster. The options available are `nginx-ingress` and `route`")
}

func (t *TransferPVCOptions) Complete(c *cobra.Command, args []string) error {
Expand Down Expand Up @@ -181,12 +186,6 @@ func (t *TransferPVCOptions) run() error {
log.Fatal(err, "unable to get destination client")
}

// quiesce the applications if needed on the source side
err = state_transfer.QuiesceApplications(srcCfg, t.PVCNamespace)
if err != nil {
log.Fatal(err, "unable to quiesce application on source cluster")
}

// set up the PVC on destination to receive the data
pvc := &corev1.PersistentVolumeClaim{}
err = srcClient.Get(context.TODO(), client.ObjectKey{Namespace: t.PVCNamespace, Name: t.PVCName}, pvc)
Expand All @@ -211,39 +210,15 @@ func (t *TransferPVCOptions) run() error {
log.Fatal(err, "invalid pvc list")
}

// create a route for data transfer
// TODO: pass in subdomain instead of ""
r := route.NewEndpoint(
types.NamespacedName{
Namespace: pvc.Namespace,
Name: pvc.Name,
}, route.EndpointTypePassthrough, metadata.Labels, "")
e, err := endpoint.Create(r, destClient)
if err != nil {
log.Fatal(err, "unable to create route endpoint")
}

_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
e, err := route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Println(err, "unable to check route health, retrying...")
return false, nil
}
ready, err := e.IsHealthy(destClient)
if err != nil {
log.Println(err, "unable to check route health, retrying...")
return false, nil
}
return ready, nil
}, make(<-chan struct{}))

e, err = route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Fatal(err, "unable to get the route object")
} else {
log.Println("route endpoint is created and is healthy")
var e endpoint.Endpoint
switch t.Endpoint {
case "route":
e = createAndWaitForRoute(pvc, destClient)
case "nignx-ingress":
e = createAndWaitForIngress(pvc, destClient)
default:
log.Fatalf("unsupported endpoint type %s\n", t.Endpoint)
}

// create an stunnel transport to carry the data over the route

s := stunnel.NewTransport(meta.NewNamespacedPair(
Expand Down Expand Up @@ -278,7 +253,7 @@ func (t *TransferPVCOptions) run() error {
rsync.Username("root"),
}

rsyncTransfer, err := rsync.NewTransfer(s, r, srcCfg, destCfg, pvcList, rsyncTransferOptions...)
rsyncTransfer, err := rsync.NewTransfer(s, e, srcCfg, destCfg, pvcList, rsyncTransferOptions...)
if err != nil {
log.Fatal(err, "error creating rsync transfer")
} else {
Expand Down Expand Up @@ -312,6 +287,80 @@ func (t *TransferPVCOptions) run() error {
log.Fatal(err, "error following rsync client logs")
}

log.Println("followed the logs, garbage collecting created resources on both source and destination")
return garbageCollect(srcClient, destClient, map[string]string{"app": "crane2"}, t.Endpoint, t.PVCNamespace)
}

func garbageCollect(srcClient client.Client, destClient client.Client, labels map[string]string, endpoint, namespace string) error {
srcGVK := []client.Object{
&corev1.Pod{},
&corev1.ConfigMap{},
&corev1.Secret{},
}
destGVK := []client.Object{
&corev1.Pod{},
&corev1.ConfigMap{},
&corev1.Secret{},
}
switch endpoint {
case "route":
destGVK = append(destGVK, &routev1.Route{})
case "nignx-ingress":
destGVK = append(destGVK, &networkingv1.Ingress{})
}

err := deleteResourcesForGVK(srcClient, srcGVK, labels, namespace)
if err != nil {
return err
}

err = deleteResourcesForGVK(destClient, destGVK, labels, namespace)
if err != nil {
return err
}

return deleteResourcesIteratively(destClient, []client.Object{
&corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: corev1.SchemeGroupVersion.Version,
},
}}, labels, namespace)
}

func deleteResourcesIteratively(c client.Client, iterativeTypes []client.Object, labels map[string]string, namespace string) error {
listOptions := []client.ListOption{
client.MatchingLabels(labels),
client.InNamespace(namespace),
}
errs := []error{}
for _, objList := range iterativeTypes {
ulist := &unstructured.UnstructuredList{}
ulist.SetGroupVersionKind(objList.GetObjectKind().GroupVersionKind())
err := c.List(context.TODO(), ulist, listOptions...)
if err != nil {
// if we hit error with one api still try all others
errs = append(errs, err)
continue
}
for _, item := range ulist.Items {
err = c.Delete(context.TODO(), &item, client.PropagationPolicy(metav1.DeletePropagationBackground))
if err != nil {
// if we hit error deleting on continue delete others
errs = append(errs, err)
}
}
}
return errorsutil.NewAggregate(errs)
}

func deleteResourcesForGVK(c client.Client, gvk []client.Object, labels map[string]string, namespace string) error {
for _, obj := range gvk {
err := c.DeleteAllOf(context.TODO(), obj, client.InNamespace(namespace), client.MatchingLabels(labels))
if err != nil {
return err
}
}
return nil
}

Expand All @@ -334,6 +383,10 @@ func followClientLogs(srcConfig *rest.Config, c client.Client, namespace string,
clientPod = &clientPodList.Items[0]

for _, containerStatus := range clientPod.Status.ContainerStatuses {
if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode == 0 {
log.Printf("container %s in pod %s completed successfully", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name})
break
}
if !containerStatus.Ready {
log.Println(fmt.Errorf("container %s in pod %s is not ready", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name}))
return false, nil
Expand Down Expand Up @@ -370,6 +423,80 @@ func followClientLogs(srcConfig *rest.Config, c client.Client, namespace string,
return err
}

func createAndWaitForIngress(pvc *corev1.PersistentVolumeClaim, destClient client.Client) endpoint.Endpoint {
// create a route for data transfer
// TODO: add a config flag for subdomain
r := ingress.NewEndpoint(
types.NamespacedName{
Namespace: pvc.Namespace,
Name: pvc.Name,
}, metadata.Labels, "crane.dev")
e, err := endpoint.Create(r, destClient)
if err != nil {
log.Fatal(err, "unable to create endpoint")
}

_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
e, err := ingress.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Println(err, "unable to check health, retrying...")
return false, nil
}
ready, err := e.IsHealthy(destClient)
if err != nil {
log.Println(err, "unable to check health, retrying...")
return false, nil
}
return ready, nil
}, make(<-chan struct{}))

e, err = ingress.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Fatal(err, "unable to get the route object")
} else {
log.Println("endpoint is created and is healthy")
}

return e
}

func createAndWaitForRoute(pvc *corev1.PersistentVolumeClaim, destClient client.Client) endpoint.Endpoint {
// create a route for data transfer
// TODO: pass in subdomain instead of ""
r := route.NewEndpoint(
types.NamespacedName{
Namespace: pvc.Namespace,
Name: pvc.Name,
}, route.EndpointTypePassthrough, metadata.Labels, "")
e, err := endpoint.Create(r, destClient)
if err != nil {
log.Fatal(err, "unable to create route endpoint")
}

_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
e, err := route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Println(err, "unable to check route health, retrying...")
return false, nil
}
ready, err := e.IsHealthy(destClient)
if err != nil {
log.Println(err, "unable to check route health, retrying...")
return false, nil
}
return ready, nil
}, make(<-chan struct{}))

e, err = route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
if err != nil {
log.Fatal(err, "unable to get the route object")
} else {
log.Println("route endpoint is created and is healthy")
}

return e
}

func clearDestPVC(destPVC *corev1.PersistentVolumeClaim) {
// TODO: some of this needs to be configuration option exposed to the user
destPVC.ResourceVersion = ""
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.16
require (
github.com/ghodss/yaml v1.0.0
github.com/jarcoal/httpmock v1.0.8
github.com/konveyor/crane-lib v0.0.4
github.com/konveyor/crane-lib v0.0.5
github.com/mitchellh/mapstructure v1.4.1
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5
github.com/openshift/api v0.0.0-20210625082935-ad54d363d274
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -461,8 +461,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konveyor/crane-lib v0.0.4 h1:CWGBC5MTmdlrEqu1F5eBSR0HRYBlo2QV/Y/bHguJPvM=
github.com/konveyor/crane-lib v0.0.4/go.mod h1:C0H3dr85YlsaAt1Av7zFu4IPdwG4+SW7wEBFE+1udTw=
github.com/konveyor/crane-lib v0.0.5 h1:qDpSvCJTy76lat1p03EuPW4EDsC+Yy3k/zhVzEJozEc=
github.com/konveyor/crane-lib v0.0.5/go.mod h1:C0H3dr85YlsaAt1Av7zFu4IPdwG4+SW7wEBFE+1udTw=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
Expand Down
17 changes: 17 additions & 0 deletions hack/minikube-clusters-delete.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set +x

SRC_CLUSTER_NAME=src
DEST_CLUSTER_NAME=dest

SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME})
DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME})
SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24"
DEST_IP_RANGE="${DEST_IP%.*}.0/24"

sudo iptables -D FORWARD -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT
sudo iptables -D FORWARD -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT

minikube delete -p ${SRC_CLUSTER_NAME}
minikube delete -p ${DEST_CLUSTER_NAME}

59 changes: 59 additions & 0 deletions hack/minikube-clusters-start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#!/usr/bin/env bash
set +x

SRC_CLUSTER_NAME=src
DEST_CLUSTER_NAME=dest

minikube status -p ${SRC_CLUSTER_NAME} >> /dev/null
if [[ $? == 0 ]]; then
echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1
fi
minikube status -p ${DEST_CLUSTER_NAME} >> /dev/null
if [[ $? == 0 ]]; then
echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1
fi

echo "create two minikube clusters"

minikube start -p ${SRC_CLUSTER_NAME}
minikube start -p ${DEST_CLUSTER_NAME}

echo "clusters started, configuring networking between source and destination clusters"

SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME})
DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME})
SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24"
DEST_IP_RANGE="${DEST_IP%.*}.0/24"

sudo iptables -I FORWARD 2 -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT
sudo iptables -I FORWARD 3 -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT

minikube ssh -p ${SRC_CLUSTER_NAME} sudo ip r add $DEST_IP_RANGE via $(echo $SOURCE_IP | cut -d"." -f1-3).1
minikube ssh -p ${DEST_CLUSTER_NAME} sudo ip r add $SOURCE_IP_RANGE via $(echo $DEST_IP | cut -d"." -f1-3).1

minikube ssh -p ${SRC_CLUSTER_NAME} "ping -c 4 ${DEST_IP}"
if [ "$?" != 0 ];
then
echo "unable to set up networking"
exit 1
fi

echo "network setup successful, configuring nginx ingress on destination cluster"
minikube addons -p ${DEST_CLUSTER_NAME} enable ingress

minikube update-context -p ${SRC_CLUSTER_NAME}

# this hack does not work if the script is run twice
COREFILE=$(kubectl get cm -n kube-system coredns -ojson | jq '.data.Corefile')
COREFILE=$(echo $COREFILE | sed s/'fallthrough\\n }\\n/& file \/etc\/coredns\/crane.db crane.dev\\n/')
kubectl get cm -n kube-system coredns -ojson | jq ".data.Corefile = ${COREFILE}" | kubectl replace -f -

kubectl patch cm -n kube-system coredns --type='json' -p='[{"op": "replace", "path": "/data/crane.db", "value": "; crane.dev test file\ncrane.dev. IN SOA a.crane.dev. b.crane.dev. 2 604800 86400 2419200 604800\ncrane.dev. IN NS a.crane.dev.\ncrane.dev. IN NS b.crane.dev.\na.crane.dev. IN A 127.0.0.1\nb.crane.dev. IN A 127.0.0.1\n\n*.crane.dev. IN A DEST_IP\n"}]'
kubectl get cm -n kube-system coredns -oyaml | sed "s/DEST_IP/${DEST_IP}/" | kubectl replace -f -

kubectl patch deploy -n kube-system coredns --type='json' -p='[{"op": "add", "path": "/spec/template/spec/volumes/0/configMap/items/1", "value": {"key": "crane.db", "path": "crane.db"}}]'

kubectl patch deploy --context=${DEST_CLUSTER_NAME} -n ingress-nginx ingress-nginx-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/12", "value": "--enable-ssl-passthrough"}]'

# force a rollout
kubectl delete rs -n ingress-nginx --context=${DEST_CLUSTER_NAME} -l app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx

0 comments on commit 6704b54

Please sign in to comment.