Skip to content

Commit

Permalink
Address comments in submariner-io#149 (submariner-io#197)
Browse files Browse the repository at this point in the history
* E2E: Refactor handling of pods

The concept of TestPodConfig and TestPod is defined, letting us create
testing pods from a single interface, and automating the creation
of UUID data to exchange.

As late modifications has shown that the number of parameters keeps growing
having a single fixture to create test pods will make easier to write
and read E2E tests.

Depends-On: submariner-io#148

* Address comments inn submariner-io#149

- renamed TestPod to NetworkPod
- renamed createListenerConnectorPair t0 runAndVerifyNetworkPod2ServicePair
- renamed other function names where appropriate

Signed-off-by: Tom Pantelis <tompantelis@gmail.com>
  • Loading branch information
tpantelis authored Oct 30, 2019
1 parent ebe9b68 commit dbd474b
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 146 deletions.
11 changes: 3 additions & 8 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/bronze1man/goStrongswanVici v0.0.0-20190921045355-4c81bd8d0bd5 h1:Vf6H6edn93L8a9veAAWAJO+puYBr2MjYNgGNZ60s2aw=
github.com/bronze1man/goStrongswanVici v0.0.0-20190921045355-4c81bd8d0bd5/go.mod h1:fWUtBEPt2yjrr3WFhOqvajM8JSEU8bEeBcoeSCsKRpc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-iptables v0.4.0 h1:wh4UbVs8DhLUbpyq97GLJDKrQMjEDD63T1xE4CrsKzQ=
github.com/coreos/go-iptables v0.4.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.4.3 h1:jJg1aFuhCqWbgBl1VTqgTHG5faPM60A5JDMjQ2HYv+A=
github.com/coreos/go-iptables v0.4.3/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
Expand Down Expand Up @@ -65,18 +63,15 @@ github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d/go.mod h1:2iMrUgb
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kelseyhightower/envconfig v1.3.0 h1:IvRS4f2VcIQy6j4ORGIf9145T/AsUB+oY8LyvN8BXNM=
github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/onsi/ginkgo v0.0.0-20190716150225-054541502288 h1:IrDHY8COHGFX/oYjUAS2vB0qfHfQ09YzChm5RikI9Z8=
github.com/onsi/ginkgo v0.0.0-20190716150225-054541502288/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
Expand Down
78 changes: 29 additions & 49 deletions test/e2e/dataplane/tcp_pod_to_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ import (
"strings"

"github.com/submariner-io/submariner/test/e2e/framework"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
Expand All @@ -32,71 +30,53 @@ var _ = Describe("[dataplane] Basic Pod to Service tests across clusters without
})
})

func testPod2ServiceTCP(f *framework.Framework, leftScheduling framework.TestPodScheduling, rightScheduling framework.TestPodScheduling) {
func testPod2ServiceTCP(f *framework.Framework, leftScheduling framework.NetworkPodScheduling, rightScheduling framework.NetworkPodScheduling) {

listenerUUID := string(uuid.NewUUID())
connectorUUID := string(uuid.NewUUID())

By("Creating a listener pod in cluster B, which will wait for a handshake over TCP")
listenerPod := f.CreateTCPCheckListenerPod(framework.ClusterB, rightScheduling, listenerUUID)

By("Pointing a service ClusterIP to the listener pod in cluster B")
service := f.CreateTCPService(framework.ClusterB, listenerPod.Labels[framework.TestAppLabel], framework.TestPort)
framework.Logf("Service for listener pod has ClusterIP: %v", service.Spec.ClusterIP)

By("Creating a connector pod in cluster A, which will attempt the specific UUID handshake over TCP")
connectorPod := f.CreateTCPCheckConnectorPod(framework.ClusterA, leftScheduling, service.Spec.ClusterIP, connectorUUID)

By("Waiting for the listener pod to exit with code 0, returning what listener sent")
exitStatusL, exitMessageL := f.WaitForPodFinishStatus(listenerPod, framework.ClusterB)
framework.Logf("Listener output:\n%s", keepLines(exitMessageL, 3))
Expect(exitStatusL).To(Equal(int32(0)))

By("Waiting for the connector pod to exit with code 0, returning what connector sent")
exitStatusC, exitMessageC := f.WaitForPodFinishStatus(connectorPod, framework.ClusterA)
framework.Logf("Connector output\n%s", keepLines(exitMessageC, 2))
Expect(exitStatusC).To(Equal(int32(0)))
listenerPod, connectorPod := runAndVerifyNetworkPod2ServicePair(f, leftScheduling, rightScheduling)

By("Verifying what the pods sent to each other contain the right UUIDs")
Expect(exitMessageL).Should(ContainSubstring(connectorUUID))
Expect(exitMessageC).Should(ContainSubstring(listenerUUID))
Expect(listenerPod.TerminationMessage).To(ContainSubstring(connectorPod.Config.Data))
Expect(connectorPod.TerminationMessage).To(ContainSubstring(listenerPod.Config.Data))
}

func testPod2ServiceTCPIPPreservation(f *framework.Framework, leftScheduling framework.TestPodScheduling, rightScheduling framework.TestPodScheduling) {
func testPod2ServiceTCPIPPreservation(f *framework.Framework, leftScheduling framework.NetworkPodScheduling, rightScheduling framework.NetworkPodScheduling) {

listenerPod, connectorPod := runAndVerifyNetworkPod2ServicePair(f, rightScheduling, leftScheduling)

// TODO(mangelajo): remove the repetition of this function, work being
// already done in PR: https://github.com/submariner-io/submariner/pull/149
listenerUUID := string(uuid.NewUUID())
connectorUUID := string(uuid.NewUUID())
framework.Logf("Connector pod has IP: %s", connectorPod.Pod.Status.PodIP)
By("Verifying the output of listener pod which must contain the source IP")
Expect(listenerPod.TerminationMessage).To(ContainSubstring(connectorPod.Pod.Status.PodIP))
}

func runAndVerifyNetworkPod2ServicePair(f *framework.Framework, leftScheduling framework.NetworkPodScheduling, rightScheduling framework.NetworkPodScheduling) (*framework.NetworkPod, *framework.NetworkPod) {
By("Creating a listener pod in cluster B, which will wait for a handshake over TCP")
listenerPod := f.CreateTCPCheckListenerPod(framework.ClusterB, rightScheduling, listenerUUID)
listenerPod := f.NewNetworkPod(&framework.NetworkPodConfig{
Type: framework.ListenerPod,
Cluster: framework.ClusterB,
Scheduling: rightScheduling,
})

By("Pointing a service ClusterIP to the listener pod in cluster B")
service := f.CreateTCPService(framework.ClusterB, listenerPod.Labels[framework.TestAppLabel], framework.TestPort)
service := listenerPod.CreateService()
framework.Logf("Service for listener pod has ClusterIP: %v", service.Spec.ClusterIP)

By("Creating a connector pod in cluster A, which will attempt the specific UUID handshake over TCP")
connectorPod := f.CreateTCPCheckConnectorPod(framework.ClusterA, leftScheduling, service.Spec.ClusterIP, connectorUUID)
connectorPod := f.NewNetworkPod(&framework.NetworkPodConfig{
Type: framework.ConnectorPod,
Cluster: framework.ClusterA,
Scheduling: leftScheduling,
RemoteIP: service.Spec.ClusterIP,
})

By("Waiting for the listener pod to exit with code 0, returning what listener sent")
exitStatusL, exitMessageL := f.WaitForPodFinishStatus(listenerPod, framework.ClusterB)
framework.Logf("Listener output:\n%s", keepLines(exitMessageL, 3))
Expect(exitStatusL).To(Equal(int32(0)))
listenerPod.AwaitSuccessfulFinish()
framework.Logf("Listener output:\n%s", keepLines(listenerPod.TerminationMessage, 3))

By("Waiting for the connector pod to exit with code 0, returning what connector sent")
exitStatusC, exitMessageC := f.WaitForPodFinishStatus(connectorPod, framework.ClusterA)
framework.Logf("Connector output\n%s", keepLines(exitMessageC, 2))
Expect(exitStatusC).To(Equal(int32(0)))
connectorPod.AwaitSuccessfulFinish()
framework.Logf("Connector output\n%s", keepLines(connectorPod.TerminationMessage, 2))

By("Retrieving updated connector pod information, including PodIP")
pc := f.ClusterClients[framework.ClusterA].CoreV1().Pods(f.Namespace)
connectorPod, err := pc.Get(connectorPod.Name, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())

framework.Logf("Connector pod has IP: %s", connectorPod.Status.PodIP)
By("Verifying the output of listener pod which must contain the source IP")
Expect(exitMessageL).To(ContainSubstring(connectorPod.Status.PodIP))
return listenerPod, connectorPod
}

func keepLines(output string, n int) string {
Expand Down
169 changes: 145 additions & 24 deletions test/e2e/framework/network_pods.go
Original file line number Diff line number Diff line change
@@ -1,22 +1,150 @@
package framework

import (
"fmt"
"strconv"
"time"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"

. "github.com/onsi/gomega"
)

type NetworkPodType int

const (
InvalidPodType NetworkPodType = iota
ListenerPod
ConnectorPod
)

type NetworkPodScheduling int

const (
InvalidScheduling NetworkPodScheduling = iota
GatewayNode
NonGatewayNode
)

type NetworkPodConfig struct {
Type NetworkPodType
Cluster ClusterIndex
Scheduling NetworkPodScheduling
Port int
Data string
RemoteIP string
// TODO: namespace, once https://github.com/submariner-io/submariner/pull/141 is merged
}

type NetworkPod struct {
Pod *v1.Pod
Config *NetworkPodConfig
TerminationCode int32
TerminationMessage string
framework *Framework
}

const (
TestPort = 1234
)

func (f *Framework) NewNetworkPod(config *NetworkPodConfig) *NetworkPod {

// check if all necessary details are provided
Expect(config.Scheduling).ShouldNot(Equal(InvalidScheduling))
Expect(config.Type).ShouldNot(Equal(InvalidPodType))

// setup unset defaults
if config.Port == 0 {
config.Port = TestPort
}

if config.Data == "" {
config.Data = string(uuid.NewUUID())
}

networkPod := &NetworkPod{Config: config, framework: f, TerminationCode: -1}

switch config.Type {
case ListenerPod:
networkPod.buildTCPCheckListenerPod()
case ConnectorPod:
networkPod.buildTCPCheckConnectorPod()
}

return networkPod
}

func (np *NetworkPod) AwaitReady() {
var finalPod *v1.Pod
pc := np.framework.ClusterClients[np.Config.Cluster].CoreV1().Pods(np.framework.Namespace)
err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := pc.Get(np.Pod.Name, metav1.GetOptions{})
if err != nil {
if IsTransientError(err) {
Logf("Transient failure when attempting to list pods: %v", err)
return false, nil // return nil to avoid PollImmediate from stopping
}
return false, err
}
if pod.Status.Phase != v1.PodRunning {
if pod.Status.Phase != v1.PodPending {
return false, fmt.Errorf("expected pod to be in phase \"Pending\" or \"Running\"")
}
return false, nil // pod is still pending
}
finalPod = pod
return true, nil // pods is running
})
Expect(err).NotTo(HaveOccurred())
np.Pod = finalPod
}

func (np *NetworkPod) AwaitSuccessfulFinish() {
pc := np.framework.ClusterClients[np.Config.Cluster].CoreV1().Pods(np.framework.Namespace)

err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
var err error
np.Pod, err = pc.Get(np.Pod.Name, metav1.GetOptions{})
if err != nil {
if IsTransientError(err) {
Logf("Transient failure when attempting to list pods: %v", err)
return false, nil // return nil to avoid PollImmediate from stopping
}
return false, err
}
switch np.Pod.Status.Phase {
case v1.PodSucceeded:
return true, nil
case v1.PodFailed:
return true, nil
default:
return false, nil
}
})

Expect(err).NotTo(HaveOccurred())

finished := np.Pod.Status.Phase == v1.PodSucceeded || np.Pod.Status.Phase == v1.PodFailed
Expect(finished).To(BeTrue())

np.TerminationCode = np.Pod.Status.ContainerStatuses[0].State.Terminated.ExitCode
np.TerminationMessage = np.Pod.Status.ContainerStatuses[0].State.Terminated.Message

Expect(np.TerminationCode).To(Equal(int32(0)))
}

func (np *NetworkPod) CreateService() *v1.Service {
return np.framework.CreateTCPService(np.Config.Cluster, np.Pod.Labels[TestAppLabel], np.Config.Port)
}

// create a test pod inside the current test namespace on the specified cluster.
// The pod will listen on TestPort over TCP, send sendString over the connection,
// and write the network response in the pod termination log, then exit with 0 status
func (f *Framework) CreateTCPCheckListenerPod(cluster ClusterIndex, scheduling TestPodScheduling, sendString string) *v1.Pod {
func (np *NetworkPod) buildTCPCheckListenerPod() {

tcpCheckListenerPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -26,7 +154,7 @@ func (f *Framework) CreateTCPCheckListenerPod(cluster ClusterIndex, scheduling T
},
},
Spec: v1.PodSpec{
Affinity: nodeAffinity(scheduling),
Affinity: nodeAffinity(np.Config.Scheduling),
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Expand All @@ -36,25 +164,26 @@ func (f *Framework) CreateTCPCheckListenerPod(cluster ClusterIndex, scheduling T
// resource environments from not sending at least some data before timeout.
Command: []string{"sh", "-c", "for i in $(seq 50); do echo listener says $SEND_STRING; done | nc -l -v -p $LISTEN_PORT -s 0.0.0.0 >/dev/termination-log 2>&1"},
Env: []v1.EnvVar{
{Name: "LISTEN_PORT", Value: strconv.Itoa(TestPort)},
{Name: "SEND_STRING", Value: sendString},
{Name: "LISTEN_PORT", Value: strconv.Itoa(np.Config.Port)},
{Name: "SEND_STRING", Value: np.Config.Data},
},
},
},
},
}

pc := f.ClusterClients[cluster].CoreV1().Pods(f.Namespace)
tcpListenerPod, err := pc.Create(&tcpCheckListenerPod)
pc := np.framework.ClusterClients[np.Config.Cluster].CoreV1().Pods(np.framework.Namespace)
var err error
np.Pod, err = pc.Create(&tcpCheckListenerPod)
Expect(err).NotTo(HaveOccurred())
return f.WaitForPodToBeReady(tcpListenerPod, cluster)
np.AwaitReady()
}

// create a test pod inside the current test namespace on the specified cluster.
// The pod will connect to remoteIP:TestPort over TCP, send sendString over the
// connection, and write the network response in the pod termination log, then
// exit with 0 status
func (f *Framework) CreateTCPCheckConnectorPod(cluster ClusterIndex, scheduling TestPodScheduling, remoteIP string, sendString string) *v1.Pod {
func (np *NetworkPod) buildTCPCheckConnectorPod() {

tcpCheckConnectorPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -64,7 +193,7 @@ func (f *Framework) CreateTCPCheckConnectorPod(cluster ClusterIndex, scheduling
},
},
Spec: v1.PodSpec{
Affinity: nodeAffinity(scheduling),
Affinity: nodeAffinity(np.Config.Scheduling),
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Expand All @@ -74,30 +203,22 @@ func (f *Framework) CreateTCPCheckConnectorPod(cluster ClusterIndex, scheduling
// resource environments from not sending at least some data before timeout.
Command: []string{"sh", "-c", "for in in $(seq 50); do echo connector says $SEND_STRING; done | nc -v $REMOTE_IP $REMOTE_PORT -w 8 >/dev/termination-log 2>&1"},
Env: []v1.EnvVar{
{Name: "REMOTE_PORT", Value: strconv.Itoa(TestPort)},
{Name: "SEND_STRING", Value: sendString},
{Name: "REMOTE_IP", Value: remoteIP},
{Name: "REMOTE_PORT", Value: strconv.Itoa(np.Config.Port)},
{Name: "SEND_STRING", Value: np.Config.Data},
{Name: "REMOTE_IP", Value: np.Config.RemoteIP},
},
},
},
},
}

pc := f.ClusterClients[cluster].CoreV1().Pods(f.Namespace)
tcpCheckPod, err := pc.Create(&tcpCheckConnectorPod)
pc := np.framework.ClusterClients[np.Config.Cluster].CoreV1().Pods(np.framework.Namespace)
var err error
np.Pod, err = pc.Create(&tcpCheckConnectorPod)
Expect(err).NotTo(HaveOccurred())
return tcpCheckPod
}

type TestPodScheduling int

const (
InvalidScheduling TestPodScheduling = iota
GatewayNode
NonGatewayNode
)

func nodeAffinity(scheduling TestPodScheduling) *v1.Affinity {
func nodeAffinity(scheduling NetworkPodScheduling) *v1.Affinity {

const gatewayLabel = "submariner.io/gateway"

Expand Down
Loading

0 comments on commit dbd474b

Please sign in to comment.