Skip to content

Commit

Permalink
E2E: Local cluster testing (#5977)
Browse files Browse the repository at this point in the history
* Cleanup validatecluster printout
* remove deprecated kubectl exec format
* Implement CreateLocalCluster function
* Update testing documentation

Signed-off-by: Derek Nola <derek.nola@suse.com>
  • Loading branch information
dereknola authored Aug 15, 2022
1 parent 116c977 commit 75f8cfb
Show file tree
Hide file tree
Showing 7 changed files with 114 additions and 37 deletions.
26 changes: 26 additions & 0 deletions tests/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,32 @@ A E2E test consists of two parts:

See the [validate cluster test](../tests/e2e/validatecluster/validatecluster_test.go) as an example.


## Setup

To run the E2E tests, you must first install the following:
- Vagrant
- Libvirt
- Vagrant plugins

### Vagrant

Download the latest version (currently 2.2.19) of Vagrant [*from the website*](https://www.vagrantup.com/downloads). Do not use built-in packages, they often old or do not include the required ruby library extensions necessary to get certain plugins working.

### Libvirt
Follow the OS specific guides to install libvirt/qemu on your host:
- [openSUSE](https://documentation.suse.com/sles/15-SP1/html/SLES-all/cha-vt-installation.html)
- [ubuntu](https://ubuntu.com/server/docs/virtualization-libvirt)
- [debian](https://wiki.debian.org/KVM#Installation)
- [fedora](https://developer.fedoraproject.org/tools/virtualization/installing-libvirt-and-virt-install-on-fedora-linux.html)

### Vagrant plugins
Install the necessary vagrant plugins with the following command:

```bash
vagrant plugin install vagrant-libvirt vagrant-scp vagrant-k3s vagrant-reload
```

## Running

Generally, E2E tests are run as a nightly Jenkins job for QA. They can still be run locally but additional setup may be required. By default, all E2E tests are designed with `libvirt` as the underlying VM provider. Instructions for installing libvirt and its associated vagrant plugin, `vagrant-libvirt` can be found [here.](https://github.com/vagrant-libvirt/vagrant-libvirt#installation) `VirtualBox` is also supported as a backup VM provider.
Expand Down
1 change: 1 addition & 0 deletions tests/e2e/docker/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def provision(vm, role, role_num, node_num)
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1 --docker]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end

Expand Down
7 changes: 4 additions & 3 deletions tests/e2e/dualstack/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,13 @@ def provision(vm, roles, role_num, node_num)
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)

vm.provision "IPv6 Setup", type: "shell", path: "../scripts/ipv6.sh", args: [node_ip4, node_ip6, vm.box]
vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, vm.box.to_s]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
Expand Down
6 changes: 6 additions & 0 deletions tests/e2e/scripts/ipv6.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,17 @@ os=$3

sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.eth1.accept_dad=0
sysctl -w net.ipv6.conf.eth1.accept_ra=0
sysctl -w net.ipv6.conf.eth1.forwarding=0

if [ -z "${os##*ubuntu*}" ]; then
netplan set ethernets.eth1.accept-ra=false
netplan set ethernets.eth1.addresses=["$ip4_addr"/24,"$ip6_addr"/64]
netplan apply
elif [ -z "${os##*alpine*}" ]; then
iplink set eth1 down
iplink set eth1 up
ip -6 addr add "$ip6_addr"/64 dev eth1
else
ip -6 addr add "$ip6_addr"/64 dev eth1
fi
Expand Down
66 changes: 60 additions & 6 deletions tests/e2e/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ func CountOfStringInSlice(str string, pods []Pod) int {
return count
}

func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
// genNodeEnvs generates the node and testing environment variables for vagrant up
func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string, string) {
serverNodeNames := make([]string, serverCount)
for i := 0; i < serverCount; i++ {
serverNodeNames[i] = "server-" + strconv.Itoa(i)
Expand All @@ -47,25 +48,74 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
for i := 0; i < agentCount; i++ {
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
}
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")

nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
nodeRoles = strings.TrimSpace(nodeRoles)

nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount)
nodeBoxes = strings.TrimSpace(nodeBoxes)

nodeEnvs := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s"`, nodeRoles, nodeBoxes)

return serverNodeNames, agentNodeNames, nodeEnvs
}

func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {

serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)

var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
}
}

cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant up &> vagrant.log`, nodeRoles, nodeBoxes, testOptions)
cmd := fmt.Sprintf(`%s %s vagrant up &> vagrant.log`, nodeEnvs, testOptions)
fmt.Println(cmd)
if _, err := RunCommand(cmd); err != nil {
fmt.Println("Error Creating Cluster", err)
return nil, nil, err
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return serverNodeNames, agentNodeNames, nil
}

// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
// This is intended only for local testing puposes when writing a new E2E test.
func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {

serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)

var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
}
}
testOptions += " E2E_RELEASE_VERSION=skip"

cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err)
}

nodeRoles := append(serverNodeNames, agentNodeNames...)

for _, node := range nodeRoles {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return nil, nil, err
}
}

cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}

return serverNodeNames, agentNodeNames, nil
}

Expand Down Expand Up @@ -240,7 +290,11 @@ func RestartCluster(nodeNames []string) error {
// RunCmdOnNode executes a command from within the given node
func RunCmdOnNode(cmd string, nodename string) (string, error) {
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename
return RunCommand(runcmd)
out, err := RunCommand(runcmd)
if err != nil {
return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err)
}
return out, nil
}

// RunCommand executes a command on the host
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/upgradecluster/upgradecluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ var _ = Describe("Verify Upgrade", func() {

// Check data after re-creation
Eventually(func() (string, error) {
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
return e2e.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})
Expand Down Expand Up @@ -364,7 +364,7 @@ var _ = Describe("Verify Upgrade", func() {

It("After upgrade verify Local Path Provisioner storage ", func() {
Eventually(func() (string, error) {
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
return e2e.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})
Expand Down
41 changes: 15 additions & 26 deletions tests/e2e/validatecluster/validatecluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
res, err := e2e.RunCmdOnNode(cmd, nodeName)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
Expect(res).Should(ContainSubstring("test-clusterip"))
}, "120s", "10s").Should(Succeed())
}
Expand All @@ -119,8 +118,7 @@ var _ = Describe("Verify Create", func() {
fmt.Println(cmd)
Eventually(func(g Gomega) {
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
}
Expand All @@ -140,16 +138,14 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())

Eventually(func(g Gomega) {
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
fmt.Println(cmd)
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
}
Expand All @@ -166,8 +162,7 @@ var _ = Describe("Verify Create", func() {

Eventually(func(g Gomega) {
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-ingress"))
}, "240s", "5s").Should(Succeed())
}
Expand Down Expand Up @@ -196,16 +191,15 @@ var _ = Describe("Verify Create", func() {

Eventually(func(g Gomega) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("dnsutils"))
}, "420s", "2s").Should(Succeed())

Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
fmt.Println(cmd)
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
})
Expand All @@ -217,54 +211,49 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
g.Expect(res).Should(ContainSubstring("Bound"))
}, "420s", "2s").Should(Succeed())

Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())

cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
_, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println("Data stored in pvc: local-path-test")

cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)

_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")

Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
g.Expect(res).Should(ContainSubstring("local-path-provisioner"))
}, "420s", "2s").Should(Succeed())

Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)

g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())

Eventually(func(g Gomega) {
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
res, err = e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
fmt.Println("Data after re-creation", res)
g.Expect(res).Should(ContainSubstring("local-path-test"))
}, "180s", "2s").Should(Succeed())
Expand Down

0 comments on commit 75f8cfb

Please sign in to comment.