Skip to content

Commit

Permalink
Automate Invalid GKE config tests and update eks/aks/gke.yaml GH Acti…
Browse files Browse the repository at this point in the history
…on to not use a specific downstream cluster (#78)

Signed-off-by: Parthvi <parthvi.vala@suse.com>
  • Loading branch information
valaparthvi authored Mar 19, 2024
1 parent 6b1d443 commit d451d4c
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 9 deletions.
5 changes: 2 additions & 3 deletions .github/workflows/aks.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This workflow calls the main workflow with custom variables
name: AKS-E2E
run-name: AKS ${{ inputs.downstream_k8s_version || '1.26.10' }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}
run-name: AKS ${{ inputs.downstream_k8s_version }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}

on:
schedule:
Expand Down Expand Up @@ -36,7 +36,6 @@ on:
type: boolean
downstream_k8s_version:
description: Downstream cluster K8s version to test
default: 1.26.10
rancher_installed:
description: Rancher details if already installed
default: 'hostname/password'
Expand All @@ -62,6 +61,6 @@ jobs:
run_support_matrix_importing_tests: ${{ contains(inputs.tests_to_run, 'support_matrix_importing') || (github.event_name == 'schedule' && false) }}
destroy_runner: ${{ inputs.destroy_runner == true || (github.event_name == 'schedule' && true) }}
runner_template: ${{ inputs.runner_template || 'hosted-prov-e2e-ci-runner-spot-n2-highmem-16-gl-template-v1' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version || '1.26.10' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version }}
rancher_installed: ${{ inputs.rancher_installed || 'hostname/password' }}
downstream_cluster_cleanup: ${{ inputs.downstream_cluster_cleanup == true || (github.event_name == 'schedule' && true) }}
5 changes: 2 additions & 3 deletions .github/workflows/eks.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This workflow calls the main workflow with custom variables
name: EKS-E2E
run-name: EKS ${{ inputs.downstream_k8s_version || '1.26' }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}
run-name: EKS ${{ inputs.downstream_k8s_version }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}

on:
schedule:
Expand Down Expand Up @@ -36,7 +36,6 @@ on:
type: boolean
downstream_k8s_version:
description: Downstream cluster K8s version to test
default: 1.26
rancher_installed:
description: Rancher details if already installed
default: 'hostname/password'
Expand All @@ -62,6 +61,6 @@ jobs:
run_support_matrix_importing_tests: ${{ contains(inputs.tests_to_run, 'support_matrix_importing') || (github.event_name == 'schedule' && false) }}
destroy_runner: ${{ inputs.destroy_runner ==true || (github.event_name == 'schedule' && true) }}
runner_template: ${{ inputs.runner_template || 'hosted-prov-e2e-ci-runner-spot-n2-highmem-16-gl-template-v1' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version || '1.26' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version }}
rancher_installed: ${{ inputs.rancher_installed || 'hostname/password' }}
downstream_cluster_cleanup: ${{ inputs.downstream_cluster_cleanup == true || (github.event_name == 'schedule' && true) }}
5 changes: 2 additions & 3 deletions .github/workflows/gke.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This workflow calls the main workflow with custom variables
name: GKE-E2E
run-name: GKE ${{ inputs.downstream_k8s_version || '1.27.3-gke.100' }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}
run-name: GKE ${{ inputs.downstream_k8s_version }} on Rancher v${{ inputs.rancher_version || '2.8-head' }} deployed on ${{ inputs.k3s_version || 'v1.27.9+k3s1' }}

on:
schedule:
Expand Down Expand Up @@ -36,7 +36,6 @@ on:
type: boolean
downstream_k8s_version:
description: Downstream cluster K8s version to test
default: 1.27.3-gke.100
rancher_installed:
description: Rancher details if already installed
default: 'hostname/password'
Expand All @@ -62,6 +61,6 @@ jobs:
run_support_matrix_importing_tests: ${{ contains(inputs.tests_to_run, 'support_matrix_importing') || (github.event_name == 'schedule' && false) }}
destroy_runner: ${{ inputs.destroy_runner ==true || (github.event_name == 'schedule' && true) }}
runner_template: ${{ inputs.runner_template || 'hosted-prov-e2e-ci-runner-spot-n2-highmem-16-gl-template-v1' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version || '1.27.3-gke.100' }}
downstream_k8s_version: ${{ inputs.downstream_k8s_version }}
rancher_installed: ${{ inputs.rancher_installed || 'hostname/password' }}
downstream_cluster_cleanup: ${{ inputs.downstream_cluster_cleanup == true || (github.event_name == 'schedule' && true) }}
108 changes: 108 additions & 0 deletions hosted/gke/p0/p0_other_provisioning_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package p0_test

import (
"strings"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters/gke"
"github.com/rancher/shepherd/pkg/config"

"github.com/rancher/hosted-providers-e2e/hosted/gke/helper"
)

var _ = Describe("P0OtherProvisioning", func() {
var cluster *management.Cluster
var (
originalConfig = new(management.GKEClusterConfigSpec)
)

BeforeEach(func() {
config.LoadConfig(gke.GKEClusterConfigConfigurationFileKey, originalConfig)

gkeConfig := new(management.GKEClusterConfigSpec)

config.LoadAndUpdateConfig(gke.GKEClusterConfigConfigurationFileKey, gkeConfig, func() {
gkeConfig.ProjectID = project
gkeConfig.Zone = zone
labels := helper.GetLabels()
gkeConfig.Labels = &labels
gkeConfig.KubernetesVersion = &k8sVersion
for _, np := range gkeConfig.NodePools {
np.Version = &k8sVersion
}
})
})
AfterEach(func() {
config.UpdateConfig(gke.GKEClusterConfigConfigurationFileKey, originalConfig)
})

Context("Provisioning a cluster with invalid config", func() {

AfterEach(func() {
if ctx.ClusterCleanup {
if cluster != nil {
err := helper.DeleteGKEHostCluster(cluster, ctx.RancherClient)
Expect(err).To(BeNil())
}
}
})

It("should fail to provision a cluster when creating cluster with invalid name", func() {
var err error
cluster, err = gke.CreateGKEHostedCluster(ctx.RancherClient, "@!invalid-gke-name-@#", ctx.CloudCred.ID, false, false, false, false, map[string]string{})
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(ContainSubstring("InvalidFormat"))
})

It("should fail to provision a cluster with invalid nodepool name", func() {
gkeConfig := new(management.GKEClusterConfigSpec)
config.LoadAndUpdateConfig(gke.GKEClusterConfigConfigurationFileKey, gkeConfig, func() {
for _, np := range gkeConfig.NodePools {
*np.Name = "#@invalid-nodepoolname-$$$$"
}
})

var err error
cluster, err = gke.CreateGKEHostedCluster(ctx.RancherClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
Expect(err).To(BeNil())

Eventually(func() bool {
clusterState, err := ctx.RancherClient.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
for _, condition := range clusterState.Conditions {
if strings.Contains(condition.Message, "Invalid value for field \"node_pool.name\"") {
return true
}
}
return false
}, "10s", "1s").Should(BeTrue())

})

It("should fail to provision a cluster with no nodepools", func() {
gkeConfig := new(management.GKEClusterConfigSpec)
config.LoadAndUpdateConfig(gke.GKEClusterConfigConfigurationFileKey, gkeConfig, func() {
gkeConfig.NodePools = nil
})

var err error
cluster, err = gke.CreateGKEHostedCluster(ctx.RancherClient, clusterName, ctx.CloudCred.ID, false, false, false, false, map[string]string{})
Expect(err).To(BeNil())

Eventually(func() bool {
clusterState, err := ctx.RancherClient.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
for _, condition := range clusterState.Conditions {
if strings.Contains(condition.Message, "Cluster.initial_node_count must be greater than zero") {
return true
}
}
return false
}, "10s", "1s").Should(BeTrue())

})
})

})

0 comments on commit d451d4c

Please sign in to comment.