diff --git a/hosted/eks/helper/helper_cluster.go b/hosted/eks/helper/helper_cluster.go index 666a6d59..39c79775 100644 --- a/hosted/eks/helper/helper_cluster.go +++ b/hosted/eks/helper/helper_cluster.go @@ -363,32 +363,33 @@ func UpdatePublicAccessSources(cluster *management.Cluster, client *rancher.Clie return cluster, nil } -// UpdateClusterTags updates the tags of a EKS cluster +// UpdateClusterTags updates the tags of a EKS cluster; +// the given tag list will replace the existing tags; this is required to be able to delete tag removal using this function // if wait is set to true, it waits until the update is complete; if checkClusterConfig is true, it validates the update func UpdateClusterTags(cluster *management.Cluster, client *rancher.Client, tags map[string]string, checkClusterConfig bool) (*management.Cluster, error) { upgradedCluster := cluster - maps.Copy(*upgradedCluster.EKSConfig.Tags, tags) + upgradedCluster.EKSConfig.Tags = &tags cluster, err := client.Management.Cluster.Update(cluster, &upgradedCluster) Expect(err).To(BeNil()) if checkClusterConfig { + // Check if the desired config is set correctly + for key, value := range tags { + Expect(*cluster.EKSConfig.Tags).Should(HaveKeyWithValue(key, value)) + } Eventually(func() bool { - // Check if the desired config is set correctly - for key, value := range tags { - Expect(*cluster.EKSConfig.Tags).Should(HaveKeyWithValue(key, value)) - } - ginkgo.GinkgoLogr.Info("Waiting for the cluster tag changes to appear in EKSStatus.UpstreamSpec ...") cluster, err = client.Management.Cluster.ByID(cluster.ID) Expect(err).To(BeNil()) - return helpers.CheckMapKeys(tags, *cluster.EKSStatus.UpstreamSpec.Tags) + return maps.Equal(tags, *cluster.EKSStatus.UpstreamSpec.Tags) }, tools.SetTimeout(10*time.Minute), 15*time.Second).Should(BeTrue()) } return cluster, nil } // UpdateNodegroupMetadata updates the tags & labels of a EKS Node groups +// the given tags and labels will replace the existing counterparts // if wait is set to true, it waits until the update is complete; if checkClusterConfig is true, it validates the update func UpdateNodegroupMetadata(cluster *management.Cluster, client *rancher.Client, tags, labels map[string]string, checkClusterConfig bool) (*management.Cluster, error) { upgradedCluster := cluster @@ -418,7 +419,7 @@ func UpdateNodegroupMetadata(cluster *management.Cluster, client *rancher.Client Expect(err).To(BeNil()) for _, ng := range cluster.EKSStatus.UpstreamSpec.NodeGroups { - if helpers.CheckMapKeys(tags, *ng.Tags) && helpers.CheckMapKeys(labels, *ng.Labels) { + if maps.Equal(tags, *ng.Tags) && maps.Equal(labels, *ng.Labels) { return true } } @@ -460,7 +461,7 @@ func ListEKSAllVersions(client *rancher.Client) (allVersions []string, err error // <==============================EKS CLI==============================> // Create AWS EKS cluster using EKS CLI -func CreateEKSClusterOnAWS(region string, clusterName string, k8sVersion string, nodes string, tags map[string]string) error { +func CreateEKSClusterOnAWS(region string, clusterName string, k8sVersion string, nodes string, tags map[string]string, extraArgs ...string) error { currentKubeconfig := os.Getenv("KUBECONFIG") defer os.Setenv("KUBECONFIG", currentKubeconfig) @@ -468,7 +469,10 @@ func CreateEKSClusterOnAWS(region string, clusterName string, k8sVersion string, formattedTags := k8slabels.SelectorFromSet(tags).String() fmt.Println("Creating EKS cluster ...") - args := []string{"create", "cluster", "--region=" + region, "--name=" + clusterName, "--version=" + k8sVersion, "--nodegroup-name", "ranchernodes", "--nodes", nodes, "--managed", "--tags", formattedTags} + args := []string{"create", "cluster", "--region=" + region, "--name=" + clusterName, "--version=" + k8sVersion, "--nodegroup-name", "ranchernodes", "--nodes", nodes, "--tags", formattedTags} + if len(extraArgs) != 0 { + args = append(args, extraArgs...) + } fmt.Printf("Running command: eksctl %v\n", args) out, err := proc.RunW("eksctl", args...) if err != nil { @@ -494,6 +498,23 @@ func UpgradeEKSClusterOnAWS(region string, clusterName string, upgradeToVersion return nil } +// AddNodeGroupOnAWS adds nodegroup ot a cluster using EKS CLI +func AddNodeGroupOnAWS(nodeName, clusterName, region string, extraArgs ...string) error { + fmt.Println("Adding nodegroup to EKS cluster ...") + args := []string{"create", "nodegroup", "--region=" + region, "--cluster", clusterName, "--name", nodeName} + if len(extraArgs) != 0 { + args = append(args, extraArgs...) + } + fmt.Printf("Running command: eksctl %v\n", args) + out, err := proc.RunW("eksctl", args...) + if err != nil { + return errors.Wrap(err, "Failed to add nodegroup: "+out) + } + fmt.Println("Added nodegroup: ", nodeName) + return nil + +} + // Upgrade EKS cluster nodegroup using EKS CLI func UpgradeEKSNodegroupOnAWS(region string, clusterName string, ngName string, upgradeToVersion string) error { fmt.Println("Upgrading EKS cluster nodegroup ...") @@ -508,17 +529,27 @@ func UpgradeEKSNodegroupOnAWS(region string, clusterName string, ngName string, return nil } -func GetFromEKS(region string, clusterName string, cmd string, query string) (out string, err error) { - clusterArgs := []string{"eksctl", "get", "cluster", "--region=" + region, "--name=" + clusterName, "-ojson", "|", "jq", "-r"} - ngArgs := []string{"eksctl", "get", "nodegroup", "--region=" + region, "--cluster=" + clusterName, "-ojson", "|", "jq", "-r"} +func GetFromEKS(region string, clusterName string, cmd string, query string, extraArgs ...string) (out string, err error) { + clusterArgs := []string{"eksctl", "get", "cluster", "--region=" + region, "--name=" + clusterName, "-ojson"} + ngArgs := []string{"eksctl", "get", "nodegroup", "--region=" + region, "--cluster=" + clusterName, "-ojson"} + queryArgs := []string{"|", "jq", "-r", query} if cmd == "cluster" { - clusterArgs = append(clusterArgs, query) + // extraArgs must be appended before queryArgs + if len(extraArgs) != 0 { + clusterArgs = append(clusterArgs, extraArgs...) + } + clusterArgs = append(clusterArgs, queryArgs...) cmd = strings.Join(clusterArgs, " ") } else { - ngArgs = append(ngArgs, query) + // extraArgs must be appended before queryArgs + if len(extraArgs) != 0 { + ngArgs = append(ngArgs, extraArgs...) + } + ngArgs = append(ngArgs, queryArgs...) cmd = strings.Join(ngArgs, " ") } + fmt.Printf("Running command: %s\n", cmd) out, err = proc.RunW("bash", "-c", cmd) return strings.TrimSpace(out), err diff --git a/hosted/eks/p1/p1_import_test.go b/hosted/eks/p1/p1_import_test.go index 317a30d7..8238f70d 100644 --- a/hosted/eks/p1/p1_import_test.go +++ b/hosted/eks/p1/p1_import_test.go @@ -6,6 +6,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" @@ -14,6 +15,24 @@ import ( var _ = Describe("P1Import", func() { var cluster *management.Cluster + BeforeEach(func() { + var err error + k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false) + Expect(err).To(BeNil()) + GinkgoLogr.Info(fmt.Sprintf("Using kubernetes version %s for cluster %s", k8sVersion, clusterName)) + }) + + AfterEach(func() { + if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { + err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + err = helper.DeleteEKSClusterOnAWS(region, clusterName) + Expect(err).To(BeNil()) + } else { + fmt.Println("Skipping downstream cluster deletion: ", clusterName) + } + }) + When("a cluster is imported for upgrade", func() { BeforeEach(func() { @@ -30,17 +49,6 @@ var _ = Describe("P1Import", func() { Expect(err).To(BeNil()) }) - AfterEach(func() { - if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { - err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = helper.DeleteEKSClusterOnAWS(region, clusterName) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } - }) - It("Upgrade version of node group only", func() { testCaseID = 88 upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient) @@ -58,31 +66,65 @@ var _ = Describe("P1Import", func() { }) }) - When("a cluster is imported", func() { - - var _ = BeforeEach(func() { - var err error - k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false) + It("should successfully Import cluster with ONLY control plane", func() { + testCaseID = 94 + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels(), "--without-nodegroup") + Expect(err).To(BeNil()) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region) + Expect(err).To(BeNil()) + Eventually(func() bool { + cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID) Expect(err).To(BeNil()) - GinkgoLogr.Info(fmt.Sprintf("Using kubernetes version %s for cluster %s", k8sVersion, clusterName)) + return cluster.Transitioning == "error" && cluster.TransitioningMessage == "Cluster must have at least one managed nodegroup or one self-managed node." + }, "5m", "2s").Should(BeTrue()) + cluster.EKSConfig = cluster.EKSStatus.UpstreamSpec + cluster, err = helper.AddNodeGroup(cluster, 1, ctx.RancherAdminClient, false, false) + Expect(err).To(BeNil()) + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + + helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName) + }) - err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) + It("successfully import EKS cluster with self-managed nodes", func() { + testCaseID = 107 + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels(), "--managed=false") + Expect(err).To(BeNil()) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region) + Expect(err).To(BeNil()) + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + }) + + When("a cluster with multiple nodegroups is imported", func() { + BeforeEach(func() { + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) Expect(err).To(BeNil()) + for i := 0; i < 2; i++ { + err = helper.AddNodeGroupOnAWS(namegen.AppendRandomString("ng"), clusterName, region) + Expect(err).To(BeNil()) + } cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region) Expect(err).To(BeNil()) cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) Expect(err).To(BeNil()) }) - AfterEach(func() { - if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { - err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - err = helper.DeleteEKSClusterOnAWS(region, clusterName) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } + It("should successfully Import cluster with at least 2 nodegroups", func() { + testCaseID = 105 + helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName) + }) + }) + + When("a cluster is imported", func() { + + var _ = BeforeEach(func() { + err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels()) + Expect(err).To(BeNil()) + cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region) + Expect(err).To(BeNil()) + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) }) It("Delete & re-import cluster", func() { @@ -121,25 +163,7 @@ var _ = Describe("P1Import", func() { It("Update Tags and Labels", func() { testCaseID = 81 - - var err error - tags := map[string]string{ - "foo": "bar", - "testCaseID": "98", - } - labels := map[string]string{ - "testCaseID": "96", - } - - By("Adding cluster tags", func() { - cluster, err = helper.UpdateClusterTags(cluster, ctx.RancherAdminClient, tags, true) - Expect(err).To(BeNil()) - }) - - By("Adding Nodegroup tags & labels", func() { - cluster, err = helper.UpdateNodegroupMetadata(cluster, ctx.RancherAdminClient, tags, labels, true) - Expect(err).To(BeNil()) - }) + updateTagsAndLabels(cluster, ctx.RancherAdminClient) }) Context("Reimporting/Editing a cluster with invalid config", func() { diff --git a/hosted/eks/p1/p1_provisioning_test.go b/hosted/eks/p1/p1_provisioning_test.go index d6924302..e49fb957 100644 --- a/hosted/eks/p1/p1_provisioning_test.go +++ b/hosted/eks/p1/p1_provisioning_test.go @@ -10,9 +10,10 @@ import ( "github.com/rancher/shepherd/extensions/clusters/eks" namegen "github.com/rancher/shepherd/pkg/namegenerator" + "k8s.io/utils/pointer" + "github.com/rancher/hosted-providers-e2e/hosted/eks/helper" "github.com/rancher/hosted-providers-e2e/hosted/helpers" - "k8s.io/utils/pointer" ) var _ = Describe("P1Provisioning", func() { @@ -25,16 +26,16 @@ var _ = Describe("P1Provisioning", func() { GinkgoLogr.Info(fmt.Sprintf("While provisioning, using kubernetes version %s for cluster %s", k8sVersion, clusterName)) }) - Context("Provisioning/Editing a cluster with invalid config", func() { + AfterEach(func() { + if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { + err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + } else { + fmt.Println("Skipping downstream cluster deletion: ", clusterName) + } + }) - AfterEach(func() { - if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { - if cluster != nil { - err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - } - } - }) + Context("Provisioning/Editing a cluster with invalid config", func() { It("should error out to provision a cluster with no nodegroups", func() { testCaseID = 141 @@ -135,6 +136,32 @@ var _ = Describe("P1Provisioning", func() { }) }) + It("should successfully Provision EKS from Rancher with Enabled GPU feature", func() { + testCaseID = 274 + var gpuNodeName = "gpuenabled" + createFunc := func(clusterConfig *eks.ClusterConfig) { + nodeGroups := *clusterConfig.NodeGroupsConfig + gpuNG := nodeGroups[0] + gpuNG.Gpu = pointer.Bool(true) + gpuNG.NodegroupName = &gpuNodeName + gpuNG.InstanceType = pointer.String("p2.xlarge") + nodeGroups = append(nodeGroups, gpuNG) + clusterConfig.NodeGroupsConfig = &nodeGroups + } + var err error + cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, region, createFunc) + Expect(err).To(BeNil()) + + cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient) + Expect(err).To(BeNil()) + + helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName) + var amiID string + amiID, err = helper.GetFromEKS(region, clusterName, "nodegroup", ".[].ImageID", "--name", gpuNodeName) + Expect(err).To(BeNil()) + Expect(amiID).To(Equal("AL2_x86_64_GPU")) + }) + When("a cluster is created for upgrade", func() { BeforeEach(func() { @@ -151,15 +178,6 @@ var _ = Describe("P1Provisioning", func() { Expect(err).To(BeNil()) }) - AfterEach(func() { - if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { - err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } - }) - It("Upgrade version of node group only", func() { testCaseID = 126 upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient) @@ -187,15 +205,6 @@ var _ = Describe("P1Provisioning", func() { Expect(err).To(BeNil()) }) - AfterEach(func() { - if ctx.ClusterCleanup && (cluster != nil && cluster.ID != "") { - err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient) - Expect(err).To(BeNil()) - } else { - fmt.Println("Skipping downstream cluster deletion: ", clusterName) - } - }) - It("Update cluster logging types", func() { testCaseID = 128 @@ -214,26 +223,7 @@ var _ = Describe("P1Provisioning", func() { It("Update Tags and Labels", func() { testCaseID = 131 - - var err error - tags := map[string]string{ - "foo": "bar", - "testCaseID": "144", - } - - labels := map[string]string{ - "testCaseID": "142", - } - - By("Adding cluster tags", func() { - cluster, err = helper.UpdateClusterTags(cluster, ctx.RancherAdminClient, tags, true) - Expect(err).To(BeNil()) - }) - - By("Adding Nodegroup tags & labels", func() { - cluster, err = helper.UpdateNodegroupMetadata(cluster, ctx.RancherAdminClient, tags, labels, true) - Expect(err).To(BeNil()) - }) + updateTagsAndLabels(cluster, ctx.RancherAdminClient) }) }) }) diff --git a/hosted/eks/p1/p1_suite_test.go b/hosted/eks/p1/p1_suite_test.go index 4f4770d4..20c4322f 100644 --- a/hosted/eks/p1/p1_suite_test.go +++ b/hosted/eks/p1/p1_suite_test.go @@ -15,6 +15,7 @@ limitations under the License. package p1_test import ( + "maps" "strconv" "strings" "testing" @@ -313,3 +314,63 @@ func upgradeCPAndAddNgCheck(cluster *management.Cluster, client *rancher.Client) return true }, "5m", "15s").Should(BeTrue()) } + +// Automate Qase 81 and 131 +func updateTagsAndLabels(cluster *management.Cluster, client *rancher.Client) { + var err error + tags := map[string]string{ + "foo": "bar", + "testCaseID": "144-97-143", + } + + labels := map[string]string{ + "testCaseID": "142-99-145", + } + + originalClusterTags := *cluster.EKSConfig.Tags + // updatedTags must contain both the original and the new tags + updatedTags := make(map[string]string) + maps.Copy(updatedTags, originalClusterTags) + maps.Copy(updatedTags, tags) + + By("Adding cluster tags", func() { + cluster, err = helper.UpdateClusterTags(cluster, client, updatedTags, true) + Expect(err).To(BeNil()) + }) + + By("Removing cluster tags", func() { + cluster, err = helper.UpdateClusterTags(cluster, client, originalClusterTags, true) + for key, value := range tags { + Expect(*cluster.EKSConfig.Tags).ToNot(HaveKeyWithValue(key, value)) + } + }) + + originalNGLabels := *cluster.EKSConfig.NodeGroups[0].Labels + // updatedNGLabels must contain both the original and the new tags + updatedNGLabels := make(map[string]string) + maps.Copy(updatedNGLabels, originalNGLabels) + maps.Copy(updatedNGLabels, labels) + + originalNGTags := *cluster.EKSConfig.NodeGroups[0].Tags + // updatedNGTags must contain both the original and the new tags + updatedNGTags := make(map[string]string) + maps.Copy(updatedNGTags, originalNGTags) + maps.Copy(updatedNGTags, tags) + + By("Adding Nodegroup tags & labels", func() { + cluster, err = helper.UpdateNodegroupMetadata(cluster, client, updatedNGTags, updatedNGLabels, true) + Expect(err).To(BeNil()) + }) + + By("Removing Nodegroup tags & labels", func() { + cluster, err = helper.UpdateNodegroupMetadata(cluster, client, originalNGTags, originalNGLabels, true) + for _, ng := range cluster.EKSConfig.NodeGroups { + for key, value := range tags { + Expect(*ng.Tags).ToNot(HaveKeyWithValue(key, value)) + } + for key, value := range labels { + Expect(*ng.Labels).ToNot(HaveKeyWithValue(key, value)) + } + } + }) +}