Skip to content

Commit

Permalink
ci: Improve TestValidateState calls from load package (#2484)
Browse files Browse the repository at this point in the history
* ci: improve TestValidateState

* chore: address comments
  • Loading branch information
jpayne3506 authored Jan 12, 2024
1 parent 12cfa78 commit f0a0a20
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 3 deletions.
53 changes: 50 additions & 3 deletions test/integration/load/load_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,46 @@ func TestValidateState(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
defer cancel()

if testConfig.ValidateStateFile {
deployment := kubernetes.MustParseDeployment(noopDeploymentMap[testConfig.OSType])
deploymentsClient := clientset.AppsV1().Deployments(namespace)

// Ensure pods exist on nodes to validate state files properly. Can obtain false positives without pods.
nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, "kubernetes.io/os="+testConfig.OSType)
require.NoError(t, err)
nodeCount := len(nodes.Items)
replicas := int32(nodeCount) * 2

deploymentExists, err := kubernetes.DeploymentExists(ctx, deploymentsClient, deployment.Name)
require.NoError(t, err)
if !deploymentExists {
t.Logf("Test deployment %s does not exist! Create %v pods in %s namespace", deployment.Name, replicas, namespace)
// Create namespace if it doesn't exist
namespaceExists, err := kubernetes.NamespaceExists(ctx, clientset, namespace)
require.NoError(t, err)
if !namespaceExists {
kubernetes.MustCreateNamespace(ctx, clientset, namespace)
}

kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment)
kubernetes.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, int(replicas), false)
} else {
t.Log("Test deployment exists! Use existing setup")
replicas, err = kubernetes.GetDeploymentAvailableReplicas(ctx, deploymentsClient, deployment.Name) // If test namespace exists then use existing Replicas
if replicas != 0 && err != nil {
require.NoError(t, err)
}
}
if replicas < int32(nodeCount) {
t.Logf("Warning - current replica count %v is below current %s node count of %d. Raising replicas to minimum required to ensure there is a pod on every node.", replicas, testConfig.OSType, nodeCount)
replicas = int32(nodeCount * 2)
kubernetes.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, int(replicas), false)
}
t.Log("Ensure deployment is in ready status")
err = kubernetes.WaitForPodDeployment(ctx, clientset, namespace, deployment.Name, podLabelSelector, int(replicas))
require.NoError(t, err)
}

validator, err := validate.CreateValidator(ctx, clientset, config, namespace, testConfig.CNIType, testConfig.RestartCase, testConfig.OSType)
require.NoError(t, err)

Expand Down Expand Up @@ -182,10 +222,12 @@ func TestValidCNSStateDuringScaleAndCNSRestartToTriggerDropgzInstall(t *testing.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
defer cancel()

validator, err := validate.CreateValidator(ctx, clientset, config, namespace, testConfig.CNIType, testConfig.RestartCase, testConfig.OSType)
require.NoError(t, err)
// Provide an option to validate state files with a proper environment before running test
if testConfig.ValidateStateFile {
t.Run("Validate state file", TestValidateState)
}

err = validator.Validate(ctx)
validator, err := validate.CreateValidator(ctx, clientset, config, namespace, testConfig.CNIType, testConfig.RestartCase, testConfig.OSType)
require.NoError(t, err)

deployment := kubernetes.MustParseDeployment(noopDeploymentMap[testConfig.OSType])
Expand Down Expand Up @@ -238,6 +280,7 @@ func TestValidCNSStateDuringScaleAndCNSRestartToTriggerDropgzInstall(t *testing.
kubernetes.MustDeleteDeployment(ctx, deploymentsClient, deployment)
err = kubernetes.WaitForPodsDelete(ctx, clientset, namespace, podLabelSelector)
require.NoError(t, err, "error waiting for pods to delete")
validator.Cleanup(ctx)
}
}

Expand All @@ -258,6 +301,10 @@ func TestV4OverlayProperties(t *testing.T) {
t.Log("Validating v4Overlay node labels")
err = validator.ValidateV4OverlayControlPlane(ctx)
require.NoError(t, err)

if testConfig.Cleanup {
validator.Cleanup(ctx)
}
}

func TestDualStackProperties(t *testing.T) {
Expand Down
12 changes: 12 additions & 0 deletions test/internal/kubernetes/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,18 @@ func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, names
return true, nil
}

func DeploymentExists(ctx context.Context, deploymentsClient typedappsv1.DeploymentInterface, deploymentName string) (bool, error) {
_, err := deploymentsClient.Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, errors.Wrapf(err, "error in getting deployment %s", deploymentName)
}

return true, nil
}

// return a label selector
func CreateLabelSelector(key string, selector *string) string {
return fmt.Sprintf("%s=%s", key, *selector)
Expand Down
10 changes: 10 additions & 0 deletions test/internal/kubernetes/utils_get.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
)

func GetNodeList(ctx context.Context, clientset *kubernetes.Clientset) (*corev1.NodeList, error) {
Expand Down Expand Up @@ -51,3 +52,12 @@ func GetPodsIpsByNode(ctx context.Context, clientset *kubernetes.Clientset, name
}
return ips, nil
}

func GetDeploymentAvailableReplicas(ctx context.Context, deploymentsClient typedappsv1.DeploymentInterface, deploymentName string) (int32, error) {
deployment, err := deploymentsClient.Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil {
return -1, errors.Wrapf(err, "could not get deployment %s", deploymentName)
}

return deployment.Status.AvailableReplicas, nil
}

0 comments on commit f0a0a20

Please sign in to comment.