From 867c73fe21b99307aff35d452f85524f6c2198d5 Mon Sep 17 00:00:00 2001 From: Yusuke KUOKA Date: Sat, 22 Dec 2018 18:55:37 +0900 Subject: [PATCH] feat: Nodegroup as a resource `eksctl` now allows you to manage any number of nodegroups other than the initial nodegroup. Changes: - `eksctl create nodegroup --cluster CLUSTER_NAME [NODEGROUP_NAME]` is added Creates an additional nodegroup. The nodegroup name is randomly generated when omitted. - `eksctl get nodegroup --cluster CLUSTER_NAME` is added Lists all the nodegroups including the initial one and the additional ones. - `eksctl delete nodegroup --cluster CLUSTER_NAME NODEGROUP_NAME` is added Deletes a nodegroup by name. - `eksctl create cluster` has been changed to accept an optional `--nodegroup NODEGROUP_NAME` that specifies the nodegroup name. - `eksctl delete cluster CLUSTER_NAME` has been changed to recursively delete all the nodegroups including additional ones. - `eksctl scale nodegroup --cluster CLUSTER_NAME NODEGROUP_NAME` has been changed to accept the target nodegroup name as the second argument Checklist: - [x] Code compiles correctly (i.e `make build`) - [x] Added tests that cover your change (if possible) - [x] All tests passing (i.e. `make test`) - Added/modified documentation as required (such as the README) - Added yourself to the `humans.txt` file Acknowledgements: This is a successor of #281 and #332. All the original credits goes to Richard Case who has started #281. Thanks a lot, Richard! Signed-off-by: Yusuke Kuoka --- Makefile | 2 + README.md | 36 +++- humans.txt | 1 + integration/creategetdelete_test.go | 80 ++++++++- integration/integration_test.go | 2 + pkg/ami/auto_resolver_test.go | 12 +- pkg/az/az_test.go | 9 +- pkg/cfn/builder/api_test.go | 87 +++++++--- pkg/cfn/builder/nodegroup.go | 25 ++- pkg/cfn/manager/api.go | 19 ++- pkg/cfn/manager/manager_suite_test.go | 11 ++ pkg/cfn/manager/nodegroup.go | 141 +++++++++++++--- pkg/cfn/manager/nodegroup_test.go | 141 ++++++++++++++++ pkg/cfn/manager/tasks.go | 38 ++++- pkg/cfn/manager/tasks_test.go | 102 ++++++++++++ pkg/cfn/manager/template.go | 20 +++ pkg/cfn/manager/template_test.go | 98 +++++++++++ pkg/cfn/manager/waiters.go | 10 ++ pkg/ctl/cmdutils/nodegroup.go | 36 ++++ pkg/ctl/create/cluster.go | 26 +-- pkg/ctl/create/create.go | 1 + pkg/ctl/create/nodegroup.go | 157 ++++++++++++++++++ pkg/ctl/delete/delete.go | 1 + pkg/ctl/delete/delete.go.orig | 33 ++++ pkg/ctl/delete/nodegroup.go | 98 +++++++++++ pkg/ctl/get/get.go | 1 + pkg/ctl/get/get.go.orig | 38 +++++ pkg/ctl/get/nodegroup.go | 104 ++++++++++++ pkg/ctl/scale/nodegroup.go | 13 +- pkg/ctl/scale/nodegroup.go.orig | 79 +++++++++ pkg/eks/api/api.go | 12 +- pkg/eks/api/api_suite_test.go | 11 ++ pkg/eks/api/node_labels.go | 44 +++++ pkg/eks/api/node_labels_test.go | 34 ++++ pkg/eks/auth.go | 2 +- pkg/eks/eks_test.go | 11 +- pkg/eks/nodegroup.go | 35 ++++ pkg/nodebootstrap/assets.go | 8 +- pkg/nodebootstrap/assets/10-eksclt.al2.conf | 3 +- pkg/nodebootstrap/assets/bootstrap.ubuntu.sh | 1 + pkg/nodebootstrap/userdata.go | 17 +- pkg/nodebootstrap/userdata_al2.go | 14 +- pkg/nodebootstrap/userdata_ubuntu.go | 10 +- .../{ => mockprovider}/mock_provider.go | 2 +- pkg/utils/nodegroup_name.go | 23 +++ 45 files changed, 1510 insertions(+), 138 deletions(-) create mode 100644 pkg/cfn/manager/manager_suite_test.go create mode 100644 pkg/cfn/manager/nodegroup_test.go create mode 100644 pkg/cfn/manager/tasks_test.go create mode 100644 pkg/cfn/manager/template.go create mode 100644 pkg/cfn/manager/template_test.go create mode 100644 pkg/ctl/cmdutils/nodegroup.go create mode 100644 pkg/ctl/create/nodegroup.go create mode 100644 pkg/ctl/delete/delete.go.orig create mode 100644 pkg/ctl/delete/nodegroup.go create mode 100644 pkg/ctl/get/get.go.orig create mode 100644 pkg/ctl/get/nodegroup.go create mode 100644 pkg/ctl/scale/nodegroup.go.orig create mode 100644 pkg/eks/api/api_suite_test.go create mode 100644 pkg/eks/api/node_labels.go create mode 100644 pkg/eks/api/node_labels_test.go rename pkg/testutils/{ => mockprovider}/mock_provider.go (99%) create mode 100644 pkg/utils/nodegroup_name.go diff --git a/Makefile b/Makefile index 8dbf291941a..5104a2df9c1 100644 --- a/Makefile +++ b/Makefile @@ -46,6 +46,7 @@ lint: ## Run linter over the codebase ci: test lint ## Target for CI system to invoke to run tests and linting TEST_CLUSTER ?= integration-test-dev +TEST_NODEGROUP ?= integration-test-dev .PHONY: integration-test-dev integration-test-dev: build ## Run the integration tests without cluster teardown. For use when developing integration tests. @./eksctl utils write-kubeconfig \ @@ -55,6 +56,7 @@ integration-test-dev: build ## Run the integration tests without cluster teardow $(TEST_ARGS) \ -args \ -eksctl.cluster=$(TEST_CLUSTER) \ + -eksctl.nodegroup=$(TEST_NODEGROUP) \ -eksctl.create=false \ -eksctl.delete=false \ -eksctl.kubeconfig=$(HOME)/.kube/eksctl/clusters/$(TEST_CLUSTER) diff --git a/README.md b/README.md index b72f6561c39..26ba8a78f03 100644 --- a/README.md +++ b/README.md @@ -168,12 +168,36 @@ To delete a cluster, run: eksctl delete cluster --name= [--region=] ``` +### Creating nodegroup + +You can add one ore more nodegroups in addition to the initial nodegroup created along with the cluster. + +To create an additional nodegroup, run: + +``` +eksctl create nodegroup --cluster= +``` + +### Listing nodegroups + +To list the details about a nodegroup or all of the nodegroups, use: + +``` +eksctl get nodegroup --cluster= [] +``` + ### Scaling nodegroup -The initial nodegroup can be scaled by using the `eksctl scale nodegroup` command. For example, to scale to 5 nodes: +A nodegroup can be scaled by using the `eksctl scale nodegroup` command: + +``` +eksctl delete nodegroup --cluster= --nodes= +``` + +For example, to scale the nodegroup `ng-abcd1234` to 5 nodes: ``` -eksctl scale nodegroup --name= --nodes=5 +eksctl scale nodegroup --cluster= --nodes=5 ng-abcd1234 ``` If the desired number of nodes is greater than the current maximum set on the ASG then the maximum value will be increased to match the number of requested nodes. And likewise for the minimum. @@ -182,6 +206,14 @@ Scaling a nodegroup works by modifying the nodegroup CloudFormation stack via a > NOTE: Scaling a nodegroup down/in (i.e. reducing the number of nodes) may result in errors as we rely purely on changes to the ASG. This means that the node(s) being removed/terminated aren't explicitly drained. This may be an area for improvement in the future. +### Deleting nodegroup + +To delete a nodegroup, run: + +``` +eksctl delete nodegroup --cluster= +``` + ### VPC Networking By default, `eksctl create cluster` instatiates a dedicated VPC, in order to avoid interference with any existing resources for a diff --git a/humans.txt b/humans.txt index 2e5acca1e09..b6d76fddc42 100644 --- a/humans.txt +++ b/humans.txt @@ -24,6 +24,7 @@ Michael Seiwald @mseiwald Anton Gruebel @gruebel Bryan Peterson @lazyshot Josue Abreu @gotjosh +Yusuke Kuoka @mumoshu /* Thanks */ diff --git a/integration/creategetdelete_test.go b/integration/creategetdelete_test.go index 7c288e841b3..6097fdefcdb 100644 --- a/integration/creategetdelete_test.go +++ b/integration/creategetdelete_test.go @@ -68,6 +68,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) Describe("when creating a cluster with 1 node", func() { + firstNgName := "ng-0" It("should not return an error", func() { if !doCreate { fmt.Fprintf(GinkgoWriter, "will use existing cluster %s", clusterName) @@ -83,6 +84,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { args := []string{"create", "cluster", "--name", clusterName, "--tags", "eksctl.cluster.k8s.io/v1alpha1/description=eksctl integration test", + "--nodegroup", firstNgName, "--node-type", "t2.medium", "--nodes", "1", "--region", region, @@ -108,7 +110,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { It("should have the required cloudformation stacks", func() { Expect(awsSession).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", clusterName))) - Expect(awsSession).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%d", clusterName, 0))) + Expect(awsSession).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", clusterName, firstNgName))) }) It("should have created a valid kubectl config file", func() { @@ -184,12 +186,13 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) }) - Context("and scale the cluster", func() { + Context("and scale the initial nodegroup", func() { It("should not return an error", func() { args := []string{"scale", "nodegroup", - "--name", clusterName, + "--cluster", clusterName, "--region", region, "--nodes", "2", + firstNgName, } command := exec.Command(eksctlPath, args...) @@ -216,6 +219,75 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) }) + Context("and add the second nodegroup", func() { + It("should not return an error", func() { + if nodegroupName == "" { + nodegroupName = "secondng" + } + + args := []string{"create", "nodegroup", + "--cluster", clusterName, + "--region", region, + "--nodes", "1", + nodegroupName, + } + + command := exec.Command(eksctlPath, args...) + cmdSession, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) + + if err != nil { + Fail(fmt.Sprintf("error starting process: %v", err), 1) + } + + cmdSession.Wait(scaleTimeout) + Expect(cmdSession.ExitCode()).Should(Equal(0)) + }) + + It("should make it 3 nodes total", func() { + test, err := newKubeTest() + Expect(err).ShouldNot(HaveOccurred()) + defer test.Close() + + test.WaitForNodesReady(3, scaleTimeout) + + nodes := test.ListNodes(metav1.ListOptions{}) + + Expect(len(nodes.Items)).To(Equal(3)) + }) + + Context("and delete the second nodegroup", func() { + It("should not return an error", func() { + args := []string{"delete", "nodegroup", + "--cluster", clusterName, + "--region", region, + nodegroupName, + } + + command := exec.Command(eksctlPath, args...) + cmdSession, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) + + if err != nil { + Fail(fmt.Sprintf("error starting process: %v", err), 1) + } + + cmdSession.Wait(deleteTimeout) + Expect(cmdSession.ExitCode()).Should(Equal(0)) + }) + + It("should make it 2 nodes total", func() { + test, err := newKubeTest() + Expect(err).ShouldNot(HaveOccurred()) + defer test.Close() + + test.WaitForNodesReady(2, scaleTimeout) + + nodes := test.ListNodes(metav1.ListOptions{}) + + Expect(len(nodes.Items)).To(Equal(2)) + }) + }) + }) + Context("and deleting the cluster", func() { It("should not return an error", func() { if !doDelete { @@ -255,7 +327,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { } Expect(awsSession).ToNot(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", clusterName))) - Expect(awsSession).ToNot(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%d", clusterName, 0))) + Expect(awsSession).ToNot(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-ng-%d", clusterName, 0))) }) }) }) diff --git a/integration/integration_test.go b/integration/integration_test.go index 6c72b336634..4a26aeb925a 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -24,6 +24,7 @@ var ( // Flags to help with the development of the integration tests clusterName string + nodegroupName string doCreate bool doDelete bool kubeconfigPath string @@ -36,6 +37,7 @@ func init() { // Flags to help with the development of the integration tests flag.StringVar(&clusterName, "eksctl.cluster", "", "Cluster name (default: generate one)") + flag.StringVar(&nodegroupName, "eksctl.nodegroup", "", "Nodegroup name (default: generate one)") flag.BoolVar(&doCreate, "eksctl.create", true, "Skip the creation tests. Useful for debugging the tests") flag.BoolVar(&doDelete, "eksctl.delete", true, "Skip the cleanup after the tests have run") flag.StringVar(&kubeconfigPath, "eksctl.kubeconfig", "", "Path to kubeconfig (default: create it a temporary file)") diff --git a/pkg/ami/auto_resolver_test.go b/pkg/ami/auto_resolver_test.go index cce3ec48053..c6a327c14f7 100644 --- a/pkg/ami/auto_resolver_test.go +++ b/pkg/ami/auto_resolver_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/mock" . "github.com/weaveworks/eksctl/pkg/ami" "github.com/weaveworks/eksctl/pkg/eks" - "github.com/weaveworks/eksctl/pkg/testutils" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" ) type returnAmi struct { @@ -22,7 +22,7 @@ var _ = Describe("AMI Auto Resolution", func() { Describe("When resolving an AMI to use", func() { var ( - p *testutils.MockProvider + p *mockprovider.MockProvider err error region string version string @@ -166,8 +166,8 @@ var _ = Describe("AMI Auto Resolution", func() { }) }) -func createProviders() (*eks.ClusterProvider, *testutils.MockProvider) { - p := testutils.NewMockProvider() +func createProviders() (*eks.ClusterProvider, *mockprovider.MockProvider) { + p := mockprovider.NewMockProvider() c := &eks.ClusterProvider{ Provider: p, @@ -176,7 +176,7 @@ func createProviders() (*eks.ClusterProvider, *testutils.MockProvider) { return c, p } -func addMockDescribeImages(p *testutils.MockProvider, expectedNamePattern string, amiId string, amiState string, createdDate string) { +func addMockDescribeImages(p *mockprovider.MockProvider, expectedNamePattern string, amiId string, amiState string, createdDate string) { p.MockEC2().On("DescribeImages", mock.MatchedBy(func(input *ec2.DescribeImagesInput) bool { for _, filter := range input.Filters { @@ -200,7 +200,7 @@ func addMockDescribeImages(p *testutils.MockProvider, expectedNamePattern string }, nil) } -func addMockDescribeImagesMultiple(p *testutils.MockProvider, expectedNamePattern string, returnAmis []returnAmi) { +func addMockDescribeImagesMultiple(p *mockprovider.MockProvider, expectedNamePattern string, returnAmis []returnAmi) { images := make([]*ec2.Image, len(returnAmis)) for index, ami := range returnAmis { images[index] = &ec2.Image{ diff --git a/pkg/az/az_test.go b/pkg/az/az_test.go index ac86600a752..7972f7ddbf4 100644 --- a/pkg/az/az_test.go +++ b/pkg/az/az_test.go @@ -5,21 +5,20 @@ import ( . "github.com/weaveworks/eksctl/pkg/az" "github.com/weaveworks/eksctl/pkg/eks" - "github.com/weaveworks/eksctl/pkg/testutils" - "github.com/aws/aws-sdk-go/aws" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" ) var _ = Describe("AZ", func() { Describe("When calling SelectZones", func() { var ( - p *testutils.MockProvider + p *mockprovider.MockProvider err error ) @@ -249,8 +248,8 @@ var _ = Describe("AZ", func() { }) }) -func createProviders() (*eks.ClusterProvider, *testutils.MockProvider) { - p := testutils.NewMockProvider() +func createProviders() (*eks.ClusterProvider, *mockprovider.MockProvider) { + p := mockprovider.NewMockProvider() c := &eks.ClusterProvider{ Provider: p, diff --git a/pkg/cfn/builder/api_test.go b/pkg/cfn/builder/api_test.go index 00135d2d9bd..cb35ce8cf2f 100644 --- a/pkg/cfn/builder/api_test.go +++ b/pkg/cfn/builder/api_test.go @@ -18,8 +18,8 @@ import ( "github.com/weaveworks/eksctl/pkg/cloudconfig" "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/nodebootstrap" - "github.com/weaveworks/eksctl/pkg/testutils" "github.com/weaveworks/eksctl/pkg/vpc" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" ) const ( @@ -36,7 +36,7 @@ const ( type Template struct { Description string - Resources map[string]struct { + Resources map[string]struct { Properties struct { Tags []struct { Key interface{} @@ -44,7 +44,7 @@ type Template struct { PropagateAtLaunch string } - UserData string + UserData string PolicyDocument struct { Statement []struct { Action []string @@ -176,22 +176,28 @@ var _ = Describe("CloudFormation template builder API", func() { testAZs := []string{"us-west-2b", "us-west-2a", "us-west-2c"} - newClusterConfig := func() *api.ClusterConfig { + newClusterConfigAndNodegroup := func() (*api.ClusterConfig, *api.NodeGroup) { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() cfg.Metadata.Region = "us-west-2" cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs + ng.Name = "ng-abcd1234" ng.InstanceType = "t2.medium" ng.AMIFamily = "AmazonLinux2" *cfg.VPC.CIDR = api.DefaultCIDR() + return cfg, ng + } + + newClusterConfig := func() *api.ClusterConfig { + cfg, _ := newClusterConfigAndNodegroup() return cfg } - p := testutils.NewMockProvider() + p := mockprovider.NewMockProvider() { joinCompare := func(input *ec2.DescribeSubnetsInput, compare string) bool { @@ -250,6 +256,7 @@ var _ = Describe("CloudFormation template builder API", func() { AMI: "", AMIFamily: "AmazonLinux2", InstanceType: "t2.medium", + Name: "ng-abcd1234", PrivateNetworking: false, }, }, @@ -300,9 +307,10 @@ var _ = Describe("CloudFormation template builder API", func() { }) Describe("AutoNameTag", func() { - cfg := newClusterConfig() + cfg, ng := newClusterConfigAndNodegroup() + cfg.CertificateAuthorityData = []byte("MyCA") - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", 0) + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", ng) err := rs.AddAllResources() It("should add all resources without errors", func() { @@ -347,9 +355,13 @@ var _ = Describe("CloudFormation template builder API", func() { cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs ng.InstanceType = "t2.medium" + ng.Name = "ng-abcd1234" - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", ng) + err := rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -365,7 +377,7 @@ var _ = Describe("CloudFormation template builder API", func() { Expect(len(obj.Resources)).ToNot(Equal(0)) Expect(len(obj.Resources["NodeGroup"].Properties.Tags)).To(Equal(2)) Expect(obj.Resources["NodeGroup"].Properties.Tags[0].Key).To(Equal("Name")) - Expect(obj.Resources["NodeGroup"].Properties.Tags[0].Value).To(Equal(clusterName + "-0-Node")) + Expect(obj.Resources["NodeGroup"].Properties.Tags[0].Value).To(Equal(clusterName + "-ng-abcd1234-Node")) Expect(obj.Resources["NodeGroup"].Properties.Tags[0].PropagateAtLaunch).To(Equal("true")) Expect(obj.Resources["NodeGroup"].Properties.Tags[1].Key).To(Equal("kubernetes.io/cluster/" + clusterName)) Expect(obj.Resources["NodeGroup"].Properties.Tags[1].Value).To(Equal("owned")) @@ -374,7 +386,8 @@ var _ = Describe("CloudFormation template builder API", func() { }) Describe("NodeGroupAutoScaling", func() { - cfg := newClusterConfig() + cfg, ng := newClusterConfigAndNodegroup() + cfg.CertificateAuthorityData = []byte("MyCA") cfg.Addons = api.ClusterAddons{ WithIAM: api.AddonIAM{ @@ -382,8 +395,11 @@ var _ = Describe("CloudFormation template builder API", func() { }, } - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", ng) + err := rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -419,13 +435,17 @@ var _ = Describe("CloudFormation template builder API", func() { cfg.Metadata.Region = "us-west-2" cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs + cfg.CertificateAuthorityData = []byte("MyCA") ng.AllowSSH = true ng.InstanceType = "t2.medium" ng.PrivateNetworking = true ng.AMIFamily = "AmazonLinux2" - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-private-ng", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-private-ng", ng) + err := rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -477,18 +497,23 @@ var _ = Describe("CloudFormation template builder API", func() { cfg.Metadata.Region = "us-west-2" cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs + cfg.CertificateAuthorityData = []byte("MyCA") ng.AllowSSH = true ng.InstanceType = "t2.medium" ng.PrivateNetworking = false ng.AMIFamily = "AmazonLinux2" - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-public-ng", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-public-ng", ng) + err := rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { Expect(err).ShouldNot(HaveOccurred()) }) + obj := Template{} It("should parse JSON withon errors", func() { err := json.Unmarshal(template, &obj) @@ -567,6 +592,7 @@ var _ = Describe("CloudFormation template builder API", func() { }, }, } + cfg.CertificateAuthorityData = []byte("MyCA") ng.AvailabilityZones = []string{testAZs[1]} ng.AllowSSH = false ng.InstanceType = "t2.medium" @@ -577,8 +603,11 @@ var _ = Describe("CloudFormation template builder API", func() { Expect(ng.AvailabilityZones).To(Equal([]string{"us-west-2a"})) }) - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-public-ng", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-public-ng", ng) + err := rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -649,7 +678,7 @@ var _ = Describe("CloudFormation template builder API", func() { } Describe("UserData - AmazonLinux2", func() { - cfg := newClusterConfig() + cfg, ng := newClusterConfigAndNodegroup() var c *cloudconfig.CloudConfig @@ -660,8 +689,11 @@ var _ = Describe("CloudFormation template builder API", func() { cfg.CertificateAuthorityData = caCertData cfg.NodeGroups[0].InstanceType = "m5.large" - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", ng) + err = rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -691,6 +723,7 @@ var _ = Describe("CloudFormation template builder API", func() { Expect(strings.Split(kubeletEnv.Content, "\n")).To(Equal([]string{ "MAX_PODS=29", "CLUSTER_DNS=10.100.0.10", + "NODE_LABELS=", })) kubeletDropInUnit := getFile(c, "/etc/systemd/system/kubelet.service.d/10-eksclt.al2.conf") @@ -713,7 +746,7 @@ var _ = Describe("CloudFormation template builder API", func() { }) Describe("UserData - Ubuntu1804", func() { - cfg := newClusterConfig() + cfg, ng := newClusterConfigAndNodegroup() var c *cloudconfig.CloudConfig @@ -726,8 +759,11 @@ var _ = Describe("CloudFormation template builder API", func() { cfg.NodeGroups[0].AMIFamily = "Ubuntu1804" cfg.NodeGroups[0].InstanceType = "m5.large" - rs := NewNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", 0) - rs.AddAllResources() + rs := NewEmbeddedNodeGroupResourceSet(cfg, "eksctl-test-123-cluster", ng) + err = rs.AddAllResources() + It("should add all resources without errors", func() { + Expect(err).ShouldNot(HaveOccurred()) + }) template, err := rs.RenderJSON() It("should serialise JSON without errors", func() { @@ -763,6 +799,7 @@ var _ = Describe("CloudFormation template builder API", func() { Expect(strings.Split(kubeletEnv.Content, "\n")).To(Equal([]string{ "MAX_PODS=29", "CLUSTER_DNS=172.20.0.10", + "NODE_LABELS=", })) kubeconfig := getFile(c, "/etc/eksctl/kubeconfig.yaml") diff --git a/pkg/cfn/builder/nodegroup.go b/pkg/cfn/builder/nodegroup.go index 81827ba03e6..4e17ce040d4 100644 --- a/pkg/cfn/builder/nodegroup.go +++ b/pkg/cfn/builder/nodegroup.go @@ -13,7 +13,6 @@ import ( // NodeGroupResourceSet stores the resource information of the node group type NodeGroupResourceSet struct { rs *resourceSet - id int clusterSpec *api.ClusterConfig spec *api.NodeGroup clusterStackName string @@ -24,15 +23,25 @@ type NodeGroupResourceSet struct { userData *gfn.Value } -// NewNodeGroupResourceSet returns a resource set for the new node group -func NewNodeGroupResourceSet(spec *api.ClusterConfig, clusterStackName string, id int) *NodeGroupResourceSet { +// NewEmbeddedNodeGroupResourceSet returns a resource set for a node group embedded in a cluster config +func NewEmbeddedNodeGroupResourceSet(spec *api.ClusterConfig, clusterStackName string, ng *api.NodeGroup) *NodeGroupResourceSet { return &NodeGroupResourceSet{ rs: newResourceSet(), - id: id, clusterStackName: clusterStackName, - nodeGroupName: fmt.Sprintf("%s-%d", spec.Metadata.Name, id), + nodeGroupName: ng.Name, clusterSpec: spec, - spec: spec.NodeGroups[id], + spec: ng, + } +} + +// NewNodeGroupResourceSet returns a resource set for a node group +func NewNodeGroupResourceSet(spec *api.ClusterConfig, clusterStackName string, ng *api.NodeGroup) *NodeGroupResourceSet { + return &NodeGroupResourceSet{ + rs: newResourceSet(), + clusterStackName: clusterStackName, + nodeGroupName: ng.Name, + clusterSpec: spec, + spec: ng, } } @@ -46,7 +55,7 @@ func (n *NodeGroupResourceSet) AddAllResources() error { n.vpc = makeImportValue(n.clusterStackName, cfnOutputClusterVPC) - userData, err := nodebootstrap.NewUserData(n.clusterSpec, n.id) + userData, err := nodebootstrap.NewUserData(n.clusterSpec, n.spec) if err != nil { return err } @@ -154,7 +163,7 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup() error { "MaxSize": fmt.Sprintf("%d", n.spec.MaxSize), "VPCZoneIdentifier": vpcZoneIdentifier, "Tags": []map[string]interface{}{ - {"Key": "Name", "Value": fmt.Sprintf("%s-Node", n.nodeGroupName), "PropagateAtLaunch": "true"}, + {"Key": "Name", "Value": fmt.Sprintf("%s-%s-Node", n.clusterSpec.Metadata.Name, n.nodeGroupName), "PropagateAtLaunch": "true"}, {"Key": "kubernetes.io/cluster/" + n.clusterSpec.Metadata.Name, "Value": "owned", "PropagateAtLaunch": "true"}, }, }, diff --git a/pkg/cfn/manager/api.go b/pkg/cfn/manager/api.go index 74f9464b69d..296c392268a 100644 --- a/pkg/cfn/manager/api.go +++ b/pkg/cfn/manager/api.go @@ -17,8 +17,8 @@ const ( // ClusterNameTag defines the tag of the clsuter name ClusterNameTag = "eksctl.cluster.k8s.io/v1alpha1/cluster-name" - // NodeGroupIDTag defines the tag of the ndoe group id - NodeGroupIDTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-id" + // NodeGroupNameTag defines the tag of the node group name + NodeGroupNameTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-name" ) var ( @@ -237,6 +237,21 @@ func (c *StackCollection) WaitDeleteStack(name string) error { return c.doWaitUntilStackIsDeleted(i) } +// WaitDeleteStackTask kills a stack by name and waits for DELETED status +// When nil is returned, the `errs` channel must receive an `error` object or `nil`. +func (c *StackCollection) WaitDeleteStackTask(name string, errs chan error) error { + i, err := c.DeleteStack(name) + if err != nil { + return err + } + + logger.Info("waiting for stack %q to get deleted", *i.StackName) + + c.waitUntilStackIsDeleted(i, errs) + + return nil +} + // DescribeStacks describes the existing stacks func (c *StackCollection) DescribeStacks(name string) ([]*Stack, error) { stacks, err := c.ListStacks(fmt.Sprintf("^(eksctl|EKS)-%s-((cluster|nodegroup-\\d+)|(VPC|ServiceRole|DefaultNodeGroup))$", name)) diff --git a/pkg/cfn/manager/manager_suite_test.go b/pkg/cfn/manager/manager_suite_test.go new file mode 100644 index 00000000000..8fe6a9ae29d --- /dev/null +++ b/pkg/cfn/manager/manager_suite_test.go @@ -0,0 +1,11 @@ +package manager + +import ( + "testing" + + "github.com/weaveworks/eksctl/pkg/testutils" +) + +func TestCFNManager(t *testing.T) { + testutils.RegisterAndRun(t, "cfn manager Suite") +} diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index c7fb47819ad..3eab5d7e117 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -3,11 +3,11 @@ package manager import ( "bytes" "fmt" + "time" - "github.com/aws/aws-sdk-go/aws" cfn "github.com/aws/aws-sdk-go/service/cloudformation" - "github.com/kris-nova/logger" "github.com/pkg/errors" + "github.com/kris-nova/logger" "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/weaveworks/eksctl/pkg/cfn/builder" @@ -18,23 +18,58 @@ const ( desiredCapacityPath = "Resources.NodeGroup.Properties.DesiredCapacity" maxSizePath = "Resources.NodeGroup.Properties.MaxSize" minSizePath = "Resources.NodeGroup.Properties.MinSize" + instanceTypePath = "Resources.NodeLaunchConfig.Properties.InstanceType" + imageIDPath = "Resources.NodeLaunchConfig.Properties.ImageId" ) -func (c *StackCollection) makeNodeGroupStackName(id int) string { - return fmt.Sprintf("eksctl-%s-nodegroup-%d", c.spec.Metadata.Name, id) +// NodeGroupSummary represents a summary of a nodegroup stack +type NodeGroupSummary struct { + StackName string + Cluster string + Name string + MaxSize int + MinSize int + DesiredCapacity int + InstanceType string + ImageID string + CreationTime *time.Time +} + +// MakeNodeGroupStackName generates the name of the node group identified by its ID, isolated by the cluster this StackCollection operates on +func (c *StackCollection) MakeNodeGroupStackName(name string) string { + return fmt.Sprintf("eksctl-%s-nodegroup-%s", c.spec.Metadata.Name, name) +} + +// CreateEmbeddedNodeGroup creates the nodegroup embedded in the cluster spec +func (c *StackCollection) CreateEmbeddedNodeGroup(errs chan error, data interface{}) error { + ng := data.(*api.NodeGroup) + name := c.MakeNodeGroupStackName(ng.Name) + logger.Info("creating nodegroup stack %q", name) + stack := builder.NewEmbeddedNodeGroupResourceSet(c.spec, c.makeClusterStackName(), ng) + if err := stack.AddAllResources(); err != nil { + return err + } + + c.tags = append(c.tags, newTag(NodeGroupNameTag, fmt.Sprintf("%s", ng.Name))) + + for k, v := range ng.Tags { + c.tags = append(c.tags, newTag(k, v)) + } + + return c.CreateStack(name, stack, nil, errs) } // CreateNodeGroup creates the nodegroup func (c *StackCollection) CreateNodeGroup(errs chan error, data interface{}) error { ng := data.(*api.NodeGroup) - name := c.makeNodeGroupStackName(ng.ID) + name := c.MakeNodeGroupStackName(ng.Name) logger.Info("creating nodegroup stack %q", name) - stack := builder.NewNodeGroupResourceSet(c.spec, c.makeClusterStackName(), ng.ID) + stack := builder.NewNodeGroupResourceSet(c.spec, c.makeClusterStackName(), ng) if err := stack.AddAllResources(); err != nil { return err } - c.tags = append(c.tags, newTag(NodeGroupIDTag, fmt.Sprintf("%d", ng.ID))) + c.tags = append(c.tags, newTag(NodeGroupNameTag, fmt.Sprintf("%s", ng.Name))) for k, v := range ng.Tags { c.tags = append(c.tags, newTag(k, v)) @@ -44,7 +79,7 @@ func (c *StackCollection) CreateNodeGroup(errs chan error, data interface{}) err } func (c *StackCollection) listAllNodeGroups() ([]string, error) { - stacks, err := c.ListStacks(fmt.Sprintf("^eksctl-%s-nodegroup-\\d$", c.spec.Metadata.Name)) + stacks, err := c.ListStacks(fmt.Sprintf("^eksctl-%s-nodegroup-.+$", c.spec.Metadata.Name)) if err != nil { return nil, err } @@ -53,7 +88,7 @@ func (c *StackCollection) listAllNodeGroups() ([]string, error) { if *s.StackStatus == cfn.StackStatusDeleteComplete { continue } - stackNames = append(stackNames, *s.StackName) + stackNames = append(stackNames, getNodeGroupName(s.Tags)) } logger.Debug("nodegroups = %v", stackNames) return stackNames, nil @@ -63,32 +98,33 @@ func (c *StackCollection) listAllNodeGroups() ([]string, error) { func (c *StackCollection) DeleteNodeGroup(errs chan error, data interface{}) error { defer close(errs) name := data.(string) - _, err := c.DeleteStack(name) - return err + stack := c.MakeNodeGroupStackName(name) + _, err := c.DeleteStack(stack) + errs <- err + return nil } // WaitDeleteNodeGroup waits until the nodegroup is deleted func (c *StackCollection) WaitDeleteNodeGroup(errs chan error, data interface{}) error { - defer close(errs) name := data.(string) - return c.WaitDeleteStack(name) + stack := c.MakeNodeGroupStackName(name) + return c.WaitDeleteStackTask(stack, errs) } // ScaleInitialNodeGroup will scale the first nodegroup (ID: 0) func (c *StackCollection) ScaleInitialNodeGroup() error { - return c.ScaleNodeGroup(0) + return c.ScaleNodeGroup(c.spec.NodeGroups[0]) } // ScaleNodeGroup will scale an existing nodegroup -func (c *StackCollection) ScaleNodeGroup(id int) error { - ng := c.spec.NodeGroups[id] +func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { clusterName := c.makeClusterStackName() c.spec.ClusterStackName = clusterName - name := c.makeNodeGroupStackName(id) + name := c.MakeNodeGroupStackName(ng.Name) logger.Info("scaling nodegroup stack %q in cluster %s", name, clusterName) // Get current stack - template, err := c.getStackTemplate(name) + template, err := c.GetStackTemplate(name) if err != nil { return errors.Wrapf(err, "error getting stack template %s", name) } @@ -136,15 +172,72 @@ func (c *StackCollection) ScaleNodeGroup(id int) error { return c.UpdateStack(name, "scale-nodegroup", descriptionBuffer.String(), []byte(template), nil) } -func (c *StackCollection) getStackTemplate(stackName string) (string, error) { - input := &cfn.GetTemplateInput{ - StackName: aws.String(stackName), +// GetNodeGroupSummaries returns a list of summaries for the nodegroups of a cluster +func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { + stacks, err := c.ListStacks(fmt.Sprintf("^(eksctl|EKS)-%s-nodegroup-.+$", c.spec.Metadata.Name), cfn.StackStatusCreateComplete) + if err != nil { + return nil, errors.Wrap(err, "getting nodegroup stacks") + } + + summaries := []*NodeGroupSummary{} + for _, stack := range stacks { + logger.Info("stack %s\n", *stack.StackName) + logger.Debug("stack = %#v", stack) + + summary, err := c.mapStackToNodeGroupSummary(stack) + if err != nil { + return nil, errors.New("error mapping stack to node gorup summary") + } + + summaries = append(summaries, summary) } - output, err := c.provider.CloudFormation().GetTemplate(input) + return summaries, nil +} + +func (c *StackCollection) mapStackToNodeGroupSummary(stack *Stack) (*NodeGroupSummary, error) { + template, err := c.GetStackTemplate(*stack.StackName) if err != nil { - return "", err + return nil, errors.Wrapf(err, "error getting Cloudformation template for stack %s", *stack.StackName) } - return *output.TemplateBody, nil + cluster := getClusterName(stack.Tags) + name := getNodeGroupName(stack.Tags) + maxSize := gjson.Get(template, maxSizePath) + minSize := gjson.Get(template, minSizePath) + desired := gjson.Get(template, desiredCapacityPath) + instanceType := gjson.Get(template, instanceTypePath) + imageID := gjson.Get(template, imageIDPath) + + summary := &NodeGroupSummary{ + StackName: *stack.StackName, + Cluster: cluster, + Name: name, + MaxSize: int(maxSize.Int()), + MinSize: int(minSize.Int()), + DesiredCapacity: int(desired.Int()), + InstanceType: instanceType.String(), + ImageID: imageID.String(), + CreationTime: stack.CreationTime, + } + + return summary, nil +} + +func getNodeGroupName(tags []*cfn.Tag) string { + for _, tag := range tags { + if *tag.Key == NodeGroupNameTag { + return *tag.Value + } + } + return "" +} + +func getClusterName(tags []*cfn.Tag) string { + for _, tag := range tags { + if *tag.Key == ClusterNameTag { + return *tag.Value + } + } + return "" } diff --git a/pkg/cfn/manager/nodegroup_test.go b/pkg/cfn/manager/nodegroup_test.go new file mode 100644 index 00000000000..4fd7f558edb --- /dev/null +++ b/pkg/cfn/manager/nodegroup_test.go @@ -0,0 +1,141 @@ +package manager + +import ( + "github.com/aws/aws-sdk-go/aws" + cfn "github.com/aws/aws-sdk-go/service/cloudformation" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + "github.com/weaveworks/eksctl/pkg/eks/api" + "errors" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" +) + +var _ = Describe("StackCollection NodeGroup", func() { + var ( + cc *api.ClusterConfig + sc *StackCollection + + p *mockprovider.MockProvider + ) + + testAZs := []string{"us-west-2b", "us-west-2a", "us-west-2c"} + + newClusterConfig := func(clusterName string) *api.ClusterConfig { + cfg := api.NewClusterConfig() + ng := cfg.NewNodeGroup() + + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName + cfg.AvailabilityZones = testAZs + ng.InstanceType = "t2.medium" + ng.AMIFamily = "AmazonLinux2" + + *cfg.VPC.CIDR = api.DefaultCIDR() + + return cfg + } + + Describe("GetNodeGroupSummaries", func() { + Context("With a cluster name", func() { + var ( + clusterName string + err error + out []*NodeGroupSummary + ) + + JustBeforeEach(func() { + p = mockprovider.NewMockProvider() + + cc = newClusterConfig(clusterName) + + sc = NewStackCollection(p, cc) + + p.MockCloudFormation().On("GetTemplate", mock.MatchedBy(func(input *cfn.GetTemplateInput) bool { + return input.StackName != nil && *input.StackName == "eksctl-test-cluster-nodegroup-12345" + })).Return(&cfn.GetTemplateOutput{ + TemplateBody: aws.String("TEMPLATE_BODY"), + }, nil) + + p.MockCloudFormation().On("GetTemplate", mock.Anything).Return(nil, errors.New("GetTemplate failed")) + + p.MockCloudFormation().On("ListStacksPages", mock.MatchedBy(func(input *cfn.ListStacksInput) bool { + return input.StackStatusFilter != nil && len(input.StackStatusFilter) > 0 && *input.StackStatusFilter[0] == cfn.StackStatusCreateComplete + }), mock.Anything).Run(func(args mock.Arguments) { + consume := args[1].(func(p *cfn.ListStacksOutput, last bool) (shouldContinue bool)) + out := &cfn.ListStacksOutput{ + StackSummaries: []*cfn.StackSummary{ + { + StackName: aws.String("eksctl-test-cluster-nodegroup-12345"), + }, + }, + } + cont := consume(out, true) + if !cont { + panic("unexpected return value from the paging function: shouldContinue was false. It becomes false only when subsequent DescribeStacks call(s) fail, which isn't expected in this test scenario") + } + }).Return(nil) + + p.MockCloudFormation().On("ListStacksPages", mock.Anything).Return(nil, errors.New("ListStacksPage failed")) + + p.MockCloudFormation().On("DescribeStacks", mock.MatchedBy(func(input *cfn.DescribeStacksInput) bool { + return input.StackName != nil && *input.StackName == "eksctl-test-cluster-nodegroup-12345" + })).Return(&cfn.DescribeStacksOutput{ + Stacks: []*cfn.Stack{ + { + StackName: aws.String("eksctl-test-cluster-nodegroup-12345"), + StackId: aws.String("eksctl-test-cluster-nodegroup-12345-id"), + }, + }, + }, nil) + + p.MockCloudFormation().On("DescribeStacks", mock.Anything).Return(nil, errors.New("DescribeStacks failed")) + }) + + Context("With no matching stacks", func() { + BeforeEach(func() { + clusterName = "test-cluster-non-existent" + }) + + JustBeforeEach(func() { + out, err = sc.GetNodeGroupSummaries() + }) + + It("should not error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should not have called AWS CloudFormation GetTemplate", func() { + Expect(p.MockCloudFormation().AssertNumberOfCalls(GinkgoT(), "GetTemplate", 0)).To(BeTrue()) + }) + + It("the output should equal the expectation", func() { + Expect(out).To(HaveLen(0)) + }) + }) + + Context("With matching stacks", func() { + BeforeEach(func() { + clusterName = "test-cluster" + }) + + JustBeforeEach(func() { + out, err = sc.GetNodeGroupSummaries() + }) + + It("should not error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should have called AWS CloudFormation GetTemplate once", func() { + Expect(p.MockCloudFormation().AssertNumberOfCalls(GinkgoT(), "GetTemplate", 1)).To(BeTrue()) + }) + + It("the output should equal the expectation", func() { + Expect(out).To(HaveLen(1)) + Expect(out[0].StackName).To(Equal("eksctl-test-cluster-nodegroup-12345")) + }) + }) + }) + }) +}) diff --git a/pkg/cfn/manager/tasks.go b/pkg/cfn/manager/tasks.go index cb169a95c48..3268909f6fb 100644 --- a/pkg/cfn/manager/tasks.go +++ b/pkg/cfn/manager/tasks.go @@ -39,6 +39,18 @@ func Run(passError func(error), tasks ...task) { wg.Wait() } +// RunTask runs a single task with a proper error handling +func (s *StackCollection) RunTask(call func(chan error, interface{}) error, data interface{}) []error { + errs := []error{} + appendErr := func(err error) { + errs = append(errs, err) + } + if Run(appendErr, task{call: call, data: data}); len(errs) > 0 { + return errs + } + return nil +} + // CreateClusterWithNodeGroups runs all tasks required to create // the stacks (a cluster and one or more nodegroups); any errors // will be returned as a slice as soon as one of the tasks or group @@ -55,7 +67,31 @@ func (s *StackCollection) CreateClusterWithNodeGroups() []error { createAllNodeGroups := []task{} for i := range s.spec.NodeGroups { t := task{ - call: s.CreateNodeGroup, + call: s.CreateEmbeddedNodeGroup, + data: s.spec.NodeGroups[i], + } + createAllNodeGroups = append(createAllNodeGroups, t) + } + if Run(appendErr, createAllNodeGroups...); len(errs) > 0 { + return errs + } + + return nil +} + +// CreateNodeGroups runs all tasks required to create the node groups; +// any errors will be returned as a slice as soon as one of the tasks +// or group of tasks is completed +func (s *StackCollection) CreateNodeGroups() []error { + errs := []error{} + appendErr := func(err error) { + errs = append(errs, err) + } + + createAllNodeGroups := []task{} + for i := range s.spec.NodeGroups { + t := task{ + call: s.CreateEmbeddedNodeGroup, data: s.spec.NodeGroups[i], } createAllNodeGroups = append(createAllNodeGroups, t) diff --git a/pkg/cfn/manager/tasks_test.go b/pkg/cfn/manager/tasks_test.go new file mode 100644 index 00000000000..7a0c53581c3 --- /dev/null +++ b/pkg/cfn/manager/tasks_test.go @@ -0,0 +1,102 @@ +package manager + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/weaveworks/eksctl/pkg/eks/api" + "errors" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" +) + +var _ = Describe("StackCollection Tasks", func() { + var ( + cc *api.ClusterConfig + sc *StackCollection + + p *mockprovider.MockProvider + + call func(chan error, interface{}) error + ) + + testAZs := []string{"us-west-2b", "us-west-2a", "us-west-2c"} + + newClusterConfig := func(clusterName string) *api.ClusterConfig { + cfg := api.NewClusterConfig() + ng := cfg.NewNodeGroup() + + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName + cfg.AvailabilityZones = testAZs + ng.InstanceType = "t2.medium" + ng.AMIFamily = "AmazonLinux2" + + *cfg.VPC.CIDR = api.DefaultCIDR() + + return cfg + } + + Describe("RunTask", func() { + Context("With a cluster name", func() { + var ( + clusterName string + errs []error + + sucecssfulData []string + ) + + BeforeEach(func() { + clusterName = "test-cluster" + + p = mockprovider.NewMockProvider() + + cc = newClusterConfig(clusterName) + + sc = NewStackCollection(p, cc) + + sucecssfulData = []string{} + + call = func(errs chan error, data interface{}) error { + s := data.(string) + if s == "fail" { + return errors.New("call failed") + } + + go func() { + defer close(errs) + + sucecssfulData = append(sucecssfulData, s) + + errs <- nil + }() + + return nil + } + }) + + Context("With an unsuccessful call", func() { + JustBeforeEach(func() { + errs = sc.RunTask(call, "fail") + }) + + It("should error", func() { + Expect(errs).To(HaveLen(1)) + Expect(errs[0]).To(HaveOccurred()) + }) + }) + + Context("With a successful call", func() { + JustBeforeEach(func() { + errs = sc.RunTask(call, "ok") + }) + + It("should not error", func() { + Expect(errs).To(HaveLen(0)) + }) + + It("should have made a side-effect with the successful data", func() { + Expect(sucecssfulData).To(Equal([]string{"ok"})) + }) + }) + }) + }) +}) diff --git a/pkg/cfn/manager/template.go b/pkg/cfn/manager/template.go new file mode 100644 index 00000000000..9b8f2b0f383 --- /dev/null +++ b/pkg/cfn/manager/template.go @@ -0,0 +1,20 @@ +package manager + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +// GetStackTemplate gets the Cloudformation template for a stack +func (c *StackCollection) GetStackTemplate(stackName string) (string, error) { + input := &cloudformation.GetTemplateInput{ + StackName: aws.String(stackName), + } + + output, err := c.provider.CloudFormation().GetTemplate(input) + if err != nil { + return "", err + } + + return *output.TemplateBody, nil +} diff --git a/pkg/cfn/manager/template_test.go b/pkg/cfn/manager/template_test.go new file mode 100644 index 00000000000..a2c6ca10713 --- /dev/null +++ b/pkg/cfn/manager/template_test.go @@ -0,0 +1,98 @@ +package manager + +import ( + "github.com/aws/aws-sdk-go/aws" + cfn "github.com/aws/aws-sdk-go/service/cloudformation" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + "github.com/weaveworks/eksctl/pkg/eks/api" + "errors" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" +) + +var _ = Describe("StackCollection Template", func() { + var ( + cc *api.ClusterConfig + sc *StackCollection + + p *mockprovider.MockProvider + ) + + testAZs := []string{"us-west-2b", "us-west-2a", "us-west-2c"} + + newClusterConfig := func(clusterName string) *api.ClusterConfig { + cfg := api.NewClusterConfig() + ng := cfg.NewNodeGroup() + + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName + cfg.AvailabilityZones = testAZs + ng.InstanceType = "t2.medium" + ng.AMIFamily = "AmazonLinux2" + + *cfg.VPC.CIDR = api.DefaultCIDR() + + return cfg + } + + Describe("GetTemplate", func() { + Context("With a cluster name", func() { + var ( + clusterName string + err error + out string + ) + + BeforeEach(func() { + clusterName = "test-cluster" + + p = mockprovider.NewMockProvider() + + cc = newClusterConfig(clusterName) + + sc = NewStackCollection(p, cc) + + p.MockCloudFormation().On("GetTemplate", mock.MatchedBy(func(input *cfn.GetTemplateInput) bool { + return input.StackName != nil && *input.StackName == "foobar" + })).Return(&cfn.GetTemplateOutput{ + TemplateBody: aws.String("TEMPLATE_BODY"), + }, nil) + + p.MockCloudFormation().On("GetTemplate", mock.Anything).Return(nil, errors.New("GetTemplate failed")) + }) + + Context("With a non-existing stack name", func() { + JustBeforeEach(func() { + out, err = sc.GetStackTemplate("non_existing_stack") + }) + + It("should error", func() { + Expect(err).To(HaveOccurred()) + }) + + It("should have called AWS CloudFormation service once", func() { + Expect(p.MockCloudFormation().AssertNumberOfCalls(GinkgoT(), "GetTemplate", 1)).To(BeTrue()) + }) + }) + + Context("With an existing stack name", func() { + JustBeforeEach(func() { + out, err = sc.GetStackTemplate("foobar") + }) + + It("should not error", func() { + Expect(err).NotTo(HaveOccurred()) + }) + + It("should have called AWS CloudFormation service once", func() { + Expect(p.MockCloudFormation().AssertNumberOfCalls(GinkgoT(), "GetTemplate", 1)).To(BeTrue()) + }) + + It("the output should equal the expectation", func() { + Expect(out).To(Equal("TEMPLATE_BODY")) + }) + }) + }) + }) +}) diff --git a/pkg/cfn/manager/waiters.go b/pkg/cfn/manager/waiters.go index b1c566d27e4..0c071252422 100644 --- a/pkg/cfn/manager/waiters.go +++ b/pkg/cfn/manager/waiters.go @@ -261,6 +261,16 @@ func (c *StackCollection) doWaitUntilStackIsDeleted(i *Stack) error { ) } +func (c *StackCollection) waitUntilStackIsDeleted(i *Stack, errs chan error) { + defer close(errs) + + if err := c.doWaitUntilStackIsDeleted(i); err != nil { + errs <- err + return + } + errs <- nil +} + func (c *StackCollection) doWaitUntilStackIsUpdated(i *Stack) error { return c.waitWithAcceptors(i, makeAcceptors( diff --git a/pkg/ctl/cmdutils/nodegroup.go b/pkg/ctl/cmdutils/nodegroup.go new file mode 100644 index 00000000000..d39a7ef4a39 --- /dev/null +++ b/pkg/ctl/cmdutils/nodegroup.go @@ -0,0 +1,36 @@ +package cmdutils + +import ( + "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ami" + "github.com/weaveworks/eksctl/pkg/eks/api" +) + +const ( + defaultNodeType = "m5.large" + defaultSSHPublicKey = "~/.ssh/id_rsa.pub" +) + +// AddCommonCreateNodeGroupFlags adds common flags for creating a node group +func AddCommonCreateNodeGroupFlags(fs *pflag.FlagSet, p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) { + fs.StringVarP(&ng.InstanceType, "node-type", "t", defaultNodeType, "node instance type") + fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", api.DefaultNodeCount, "total number of nodes (for a static ASG)") + + // TODO: https://github.com/weaveworks/eksctl/issues/28 + fs.IntVarP(&ng.MinSize, "nodes-min", "m", 0, "minimum nodes in ASG") + fs.IntVarP(&ng.MaxSize, "nodes-max", "M", 0, "maximum nodes in ASG") + + fs.IntVarP(&ng.VolumeSize, "node-volume-size", "", 0, "Node volume size (in GB)") + fs.IntVar(&ng.MaxPodsPerNode, "max-pods-per-node", 0, "maximum number of pods per node (set automatically if unspecified)") + + fs.BoolVar(&ng.AllowSSH, "ssh-access", false, "control SSH access for nodes") + fs.StringVar(&ng.SSHPublicKeyPath, "ssh-public-key", defaultSSHPublicKey, "SSH public key to use for nodes (import from local path, or use existing EC2 key pair)") + + fs.StringVar(&ng.AMI, "node-ami", ami.ResolverStatic, "Advanced use cases only. If 'static' is supplied (default) then eksctl will use static AMIs; if 'auto' is supplied then eksctl will automatically set the AMI based on version/region/instance type; if any other value is supplied it will override the AMI to use for the nodes. Use with extreme care.") + fs.StringVar(&ng.AMIFamily, "node-ami-family", ami.ImageFamilyAmazonLinux2, "Advanced use cases only. If 'AmazonLinux2' is supplied (default), then eksctl will use the offical AWS EKS AMIs (Amazon Linux 2); if 'Ubuntu1804' is supplied, then eksctl will use the offical Canonical EKS AMIs (Ubuntu 18.04).") + + fs.BoolVarP(&ng.PrivateNetworking, "node-private-networking", "P", false, "whether to make nodegroup networking private") + + fs.Var(&ng.Labels, "node-labels", `labels to set for the nodegroup, e.g. "partition=backend,nodeclass=hugememory"`) + fs.StringSliceVar(&ng.AvailabilityZones, "node-zones", nil, "(iherited from the cluster if unspecified)") +} diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index cf9f157cd25..13ecac7a410 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/weaveworks/eksctl/pkg/ami" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" @@ -19,11 +18,6 @@ import ( "github.com/weaveworks/eksctl/pkg/vpc" ) -const ( - defaultNodeType = "m5.large" - defaultSSHPublicKey = "~/.ssh/id_rsa.pub" -) - var ( writeKubeconfig bool kubeconfigPath string @@ -65,24 +59,8 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { }) group.InFlagSet("Initial nodegroup", func(fs *pflag.FlagSet) { - fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", api.DefaultNodeCount, "total number of nodes (desired capacity of ASG)") - - // TODO: https://github.com/weaveworks/eksctl/issues/28 - fs.IntVarP(&ng.MinSize, "nodes-min", "m", 0, "minimum nodes in ASG (leave unset for a static nodegroup)") - fs.IntVarP(&ng.MaxSize, "nodes-max", "M", 0, "maximum nodes in ASG (leave unset for a static nodegroup)") - - fs.StringVarP(&ng.InstanceType, "node-type", "t", defaultNodeType, "node instance type") - - fs.IntVarP(&ng.VolumeSize, "node-volume-size", "", 0, "Node volume size (in GB)") - fs.IntVar(&ng.MaxPodsPerNode, "max-pods-per-node", 0, "maximum number of pods per node (set automatically if unspecified)") - - fs.StringVar(&ng.AMI, "node-ami", ami.ResolverStatic, "Advanced use cases only. If 'static' is supplied (default) then eksctl will use static AMIs; if 'auto' is supplied then eksctl will automatically set the AMI based on version/region/instance type; if any other value is supplied it will override the AMI to use for the nodes. Use with extreme care.") - fs.StringVar(&ng.AMIFamily, "node-ami-family", ami.ImageFamilyAmazonLinux2, "Advanced use cases only. If 'AmazonLinux2' is supplied (default), then eksctl will use the offical AWS EKS AMIs (Amazon Linux 2); if 'Ubuntu1804' is supplied, then eksctl will use the offical Canonical EKS AMIs (Ubuntu 18.04).") - - fs.BoolVar(&ng.AllowSSH, "ssh-access", false, "control SSH access for nodes") - fs.StringVar(&ng.SSHPublicKeyPath, "ssh-public-key", defaultSSHPublicKey, "SSH public key to use for nodes (import from local path, or use existing EC2 key pair)") - - fs.BoolVarP(&ng.PrivateNetworking, "node-private-networking", "P", false, "whether to make initial nodegroup networking private") + fs.StringVar(&ng.Name, "nodegroup", "", "Name of the nodegroup. Defaults to \"ng-\"") + cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) }) group.InFlagSet("Cluster add-ons", func(fs *pflag.FlagSet) { diff --git a/pkg/ctl/create/create.go b/pkg/ctl/create/create.go index 1e930456692..b0b611abd87 100644 --- a/pkg/ctl/create/create.go +++ b/pkg/ctl/create/create.go @@ -19,6 +19,7 @@ func Command(g *cmdutils.Grouping) *cobra.Command { } cmd.AddCommand(createClusterCmd(g)) + cmd.AddCommand(createNodeGroupCmd(g)) return cmd } diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go new file mode 100644 index 00000000000..2f6ead8680e --- /dev/null +++ b/pkg/ctl/create/nodegroup.go @@ -0,0 +1,157 @@ +package create + +import ( + "fmt" + "os" + + awseks "github.com/aws/aws-sdk-go/service/eks" + "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/eks/api" + "strings" + "github.com/weaveworks/eksctl/pkg/utils" +) + +func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { + p := &api.ProviderConfig{} + cfg := api.NewClusterConfig() + ng := cfg.NewNodeGroup() + + cmd := &cobra.Command{ + Use: "nodegroup", + Short: "Create a nodegroup", + Run: func(_ *cobra.Command, args []string) { + name := cmdutils.GetNameArg(args) + if name != "" { + ng.Name = name + } + if err := doAddNodeGroup(p, cfg, ng); err != nil { + logger.Critical("%s\n", err.Error()) + os.Exit(1) + } + }, + } + + group := g.New(cmd) + + group.InFlagSet("General", func(fs *pflag.FlagSet) { + fs.StringVar(&cfg.Metadata.Name, "cluster", "", "Name of the EKS cluster to add the nodegroup to") + cmdutils.AddRegionFlag(fs, p) + cmdutils.AddCFNRoleARNFlag(fs, p) + fs.StringVar(&cfg.Metadata.Version, "version", api.LatestVersion, fmt.Sprintf("Kubernetes version (valid options: %s)", strings.Join(api.SupportedVersions(), ","))) + }) + + group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Defaults to \"ng-\"") + cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) + }) + + cmdutils.AddCommonFlagsForAWS(group, p) + + group.AddTo(cmd) + + return cmd +} + +func doAddNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { + ctl := eks.New(p, cfg) + meta := cfg.Metadata + + if !ctl.IsSupportedRegion() { + return cmdutils.ErrUnsupportedRegion(p) + } + logger.Info("using region %s", meta.Region) + + if err := ctl.CheckAuth(); err != nil { + return err + } + + if cfg.Metadata.Name == "" { + return errors.New("--cluster must be specified. run `eksctl get cluster` to show existing clusters") + } + + if ng.SSHPublicKeyPath == "" { + return fmt.Errorf("--ssh-public-key must be non-empty string") + } + + //TODO: do we need to do the AZ stuff from create???? + + if err := ctl.EnsureAMI(meta.Version, ng); err != nil { + return err + } + + if err := ctl.LoadSSHPublicKey(cfg.Metadata.Name, ng); err != nil { + return err + } + + logger.Debug("cfg = %#v", cfg) + + //TODO: is this check needed???? + // Check the cluster exists and is active + eksCluster, err := ctl.DescribeControlPlane(cfg.Metadata) + if err != nil { + return err + } + if *eksCluster.Status != awseks.ClusterStatusActive { + return fmt.Errorf("cluster %s status is %s, its needs to be active to add a nodegroup", *eksCluster.Name, *eksCluster.Status) + } + logger.Info("found cluster %s", *eksCluster.Name) + logger.Debug("cluster = %#v", eksCluster) + + // Populate cfg with the endopoint, CA data, and so on obtained from the described control-plane + // So that we won't end up rendering a incomplete useradata missing those things + if err = ctl.GetCredentials(*eksCluster, cfg); err != nil { + return err + } + + { + stackManager := ctl.NewStackManager(cfg) + if ng.Name == "" { + ng.Name = utils.NodegroupName() + } + logger.Info("will create a Cloudformation stack for nodegroup %s for cluster %s", ng.Name, cfg.Metadata.Name) + errs := stackManager.RunTask(stackManager.CreateNodeGroup, ng) + if len(errs) > 0 { + logger.Info("%d error(s) occurred and nodegroup hasn't been created properly, you may wish to check CloudFormation console", len(errs)) + logger.Info("to cleanup resources, run 'eksctl delete nodegroup %s --region=%s --name=%s'", ng.Name, cfg.Metadata.Region, cfg.Metadata.Name) + for _, err := range errs { + if err != nil { + logger.Critical("%s\n", err.Error()) + } + } + return fmt.Errorf("failed to create nodegroup %s for cluster %q", ng.Name, cfg.Metadata.Name) + } + } + + { // post-creation action + clientConfigBase, err := ctl.NewClientConfig(cfg) + if err != nil { + return err + } + + clientConfig := clientConfigBase.WithExecAuthenticator() + + clientSet, err := clientConfig.NewClientSet() + if err != nil { + return err + } + + // authorise nodes to join + if err = ctl.AddNodeGroupToAuthConfigMap(clientSet, ng); err != nil { + return err + } + + // wait for nodes to join + if err = ctl.WaitForNodes(clientSet, ng); err != nil { + return err + } + } + logger.Success("EKS cluster %q in %q region has a new nodegroup with name %d", cfg.Metadata.Name, cfg.Metadata.Region, ng.Name) + + return nil + +} diff --git a/pkg/ctl/delete/delete.go b/pkg/ctl/delete/delete.go index d4d799b3832..d097939b911 100644 --- a/pkg/ctl/delete/delete.go +++ b/pkg/ctl/delete/delete.go @@ -23,6 +23,7 @@ func Command(g *cmdutils.Grouping) *cobra.Command { } cmd.AddCommand(deleteClusterCmd(g)) + cmd.AddCommand(deleteNodeGroupCmd()) return cmd } diff --git a/pkg/ctl/delete/delete.go.orig b/pkg/ctl/delete/delete.go.orig new file mode 100644 index 00000000000..57d33b4fd02 --- /dev/null +++ b/pkg/ctl/delete/delete.go.orig @@ -0,0 +1,33 @@ +package delete + +import ( + "github.com/kris-nova/logger" + "github.com/spf13/cobra" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" +) + +var ( + waitDelete bool +) + +// Command will create the `delete` commands +func Command(g *cmdutils.Grouping) *cobra.Command { + cmd := &cobra.Command{ + Use: "delete", + Short: "Delete resource(s)", + Run: func(c *cobra.Command, _ []string) { + if err := c.Help(); err != nil { + logger.Debug("ignoring error %q", err.Error()) + } + }, + } + +<<<<<<< HEAD + cmd.AddCommand(deleteClusterCmd()) + cmd.AddCommand(deleteNodeGroupCmd()) +======= + cmd.AddCommand(deleteClusterCmd(g)) +>>>>>>> origin/master + + return cmd +} diff --git a/pkg/ctl/delete/nodegroup.go b/pkg/ctl/delete/nodegroup.go new file mode 100644 index 00000000000..da945b12db2 --- /dev/null +++ b/pkg/ctl/delete/nodegroup.go @@ -0,0 +1,98 @@ +package delete + +import ( + "fmt" + "os" + + "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/spf13/cobra" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/eks/api" + "errors" + "github.com/spf13/pflag" +) + +func deleteNodeGroupCmd() *cobra.Command { + p := &api.ProviderConfig{} + cfg := api.NewClusterConfig() + var nodegroupName string + + cmd := &cobra.Command{ + Use: "nodegroup NAME", + Short: "Delete a nodegroup", + Args: cobra.MinimumNArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + name := cmdutils.GetNameArg(args) + if name != "" { + nodegroupName = name + } + if err := doDeleteNodeGroup(p, cfg, nodegroupName); err != nil { + logger.Critical("%s\n", err.Error()) + os.Exit(1) + } + return nil + }, + } + + group := &cmdutils.NamedFlagSetGroup{} + + group.InFlagSet("General", func(fs *pflag.FlagSet) { + fs.StringVarP(&cfg.Metadata.Name, "cluster", "n", "", "EKS cluster name (required)") + cmdutils.AddRegionFlag(fs, p) + fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") + }) + + cmdutils.AddCommonFlagsForAWS(group, p) + + group.AddTo(cmd) + + return cmd +} + +func doDeleteNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + ctl := eks.New(p, cfg) + + if err := ctl.CheckAuth(); err != nil { + return err + } + + if cfg.Metadata.Name == "" { + return errors.New("`--cluster` must be set") + } + + logger.Info("deleting EKS nodegroup %q-nodegroup-%s", cfg.Metadata.Name, name) + + var deletedResources []string + + handleIfError := func(err error, name string) bool { + if err != nil { + logger.Debug("continue despite error: %v", err) + return true + } + logger.Debug("deleted %q", name) + deletedResources = append(deletedResources, name) + return false + } + + // We can remove all 'DeprecatedDelete*' calls in 0.2.0 + + stackManager := ctl.NewStackManager(cfg) + + { + err := stackManager.WaitDeleteNodeGroup(nil, name) + errs := []error{err} + if len(errs) > 0 { + logger.Info("%d error(s) occurred while deleting nodegroup(s)", len(errs)) + for _, err := range errs { + if err != nil { + logger.Critical("%s\n", err.Error()) + } + } + handleIfError(fmt.Errorf("failed to delete nodegroup(s)"), "nodegroup(s)") + } + logger.Debug("all nodegroups were deleted") + } + + return nil +} diff --git a/pkg/ctl/get/get.go b/pkg/ctl/get/get.go index 525a7e93202..cb59fb93abc 100644 --- a/pkg/ctl/get/get.go +++ b/pkg/ctl/get/get.go @@ -28,6 +28,7 @@ func Command(g *cmdutils.Grouping) *cobra.Command { } cmd.AddCommand(getClusterCmd(g)) + cmd.AddCommand(getNodegroupCmd()) return cmd } diff --git a/pkg/ctl/get/get.go.orig b/pkg/ctl/get/get.go.orig new file mode 100644 index 00000000000..b03bd01651c --- /dev/null +++ b/pkg/ctl/get/get.go.orig @@ -0,0 +1,38 @@ +package get + +import ( + "github.com/kris-nova/logger" + "github.com/spf13/cobra" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" +) + +const ( + defaultChunkSize = 100 +) + +var ( + chunkSize int + output string +) + +// Command will create the `get` commands +func Command(g *cmdutils.Grouping) *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Get resource(s)", + Run: func(c *cobra.Command, _ []string) { + if err := c.Help(); err != nil { + logger.Debug("ignoring error %q", err.Error()) + } + }, + } + +<<<<<<< HEAD + cmd.AddCommand(getClusterCmd()) + cmd.AddCommand(getNodegroupCmd()) +======= + cmd.AddCommand(getClusterCmd(g)) +>>>>>>> origin/master + + return cmd +} diff --git a/pkg/ctl/get/nodegroup.go b/pkg/ctl/get/nodegroup.go new file mode 100644 index 00000000000..b972cd59096 --- /dev/null +++ b/pkg/ctl/get/nodegroup.go @@ -0,0 +1,104 @@ +package get + +import ( + "os" + "strconv" + "time" + + "github.com/weaveworks/eksctl/pkg/cfn/manager" + + "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/eks/api" + "github.com/weaveworks/eksctl/pkg/printers" +) + +func getNodegroupCmd() *cobra.Command { + p := &api.ProviderConfig{} + cfg := api.NewClusterConfig() + + cmd := &cobra.Command{ + Use: "nodegroup", + Short: "Get nodegroups(s)", + Aliases: []string{"nodegroups"}, + Run: func(_ *cobra.Command, args []string) { + if err := doGetNodegroups(p, cfg, cmdutils.GetNameArg(args)); err != nil { + logger.Critical("%s\n", err.Error()) + os.Exit(1) + } + }, + } + + fs := cmd.Flags() + + fs.StringVarP(&cfg.Metadata.Name, "cluster", "n", "", "EKS cluster name") + + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + + fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") + return cmd +} + +func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + ctl := eks.New(p, cfg) + + if err := ctl.CheckAuth(); err != nil { + return err + } + + if cfg.Metadata.Name == "" { + return errors.New("--cluster must not be omitted") + } + + manager := ctl.NewStackManager(cfg) + summaries, err := manager.GetNodeGroupSummaries() + if err != nil { + return errors.Wrap(err, "getting nodegroup stack summaries") + } + + printer, err := printers.NewPrinter(output) + if err != nil { + return err + } + + if output == "table" { + addSummaryTableColumns(printer.(*printers.TablePrinter)) + } + + if err := printer.PrintObj("nodegroups", summaries, os.Stdout); err != nil { + return err + } + + return nil +} + +func addSummaryTableColumns(printer *printers.TablePrinter) { + printer.AddColumn("CLUSTER", func(s *manager.NodeGroupSummary) string { + return s.Cluster + }) + printer.AddColumn("NODEGROUP", func(s *manager.NodeGroupSummary) string { + return s.Name + }) + printer.AddColumn("CREATED", func(s *manager.NodeGroupSummary) string { + return s.CreationTime.Format(time.RFC3339) + }) + printer.AddColumn("MIN SIZE", func(s *manager.NodeGroupSummary) string { + return strconv.Itoa(s.MinSize) + }) + printer.AddColumn("MAX SIZE", func(s *manager.NodeGroupSummary) string { + return strconv.Itoa(s.MaxSize) + }) + printer.AddColumn("DESIRED CAPACITY", func(s *manager.NodeGroupSummary) string { + return strconv.Itoa(s.DesiredCapacity) + }) + printer.AddColumn("INSTANCE TYPE", func(s *manager.NodeGroupSummary) string { + return s.InstanceType + }) + printer.AddColumn("IMAGE ID", func(s *manager.NodeGroupSummary) string { + return s.ImageID + }) +} diff --git a/pkg/ctl/scale/nodegroup.go b/pkg/ctl/scale/nodegroup.go index 1a8b694fb64..85985e81830 100644 --- a/pkg/ctl/scale/nodegroup.go +++ b/pkg/ctl/scale/nodegroup.go @@ -18,20 +18,25 @@ func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { ng := cfg.NewNodeGroup() cmd := &cobra.Command{ - Use: "nodegroup", + Use: "nodegroup NAME", Short: "Scale a nodegroup", - Run: func(_ *cobra.Command, args []string) { + RunE: func(_ *cobra.Command, args []string) error { + name := cmdutils.GetNameArg(args) + if name != "" { + ng.Name = name + } if err := doScaleNodeGroup(p, cfg, ng); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } + return nil }, } group := g.New(cmd) group.InFlagSet("General", func(fs *pflag.FlagSet) { - fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") + fs.StringVarP(&cfg.Metadata.Name, "cluster", "n", "", "EKS cluster name") fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") @@ -62,7 +67,7 @@ func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Nod } stackManager := ctl.NewStackManager(cfg) - err := stackManager.ScaleInitialNodeGroup() + err := stackManager.ScaleNodeGroup(ng) if err != nil { return fmt.Errorf("failed to scale nodegroup for cluster %q, error %v", cfg.Metadata.Name, err) } diff --git a/pkg/ctl/scale/nodegroup.go.orig b/pkg/ctl/scale/nodegroup.go.orig new file mode 100644 index 00000000000..154f9b21329 --- /dev/null +++ b/pkg/ctl/scale/nodegroup.go.orig @@ -0,0 +1,79 @@ +package scale + +import ( + "fmt" + "os" + + "github.com/kris-nova/logger" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" + "github.com/weaveworks/eksctl/pkg/eks" + "github.com/weaveworks/eksctl/pkg/eks/api" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" +) + +func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { + p := &api.ProviderConfig{} + cfg := api.NewClusterConfig() + ng := cfg.NewNodeGroup() + + cmd := &cobra.Command{ + Use: "nodegroup NAME", + Short: "Scale a nodegroup", + RunE: func(_ *cobra.Command, args []string) error { + name := cmdutils.GetNameArg(args) + if name != "" { + ng.Name = name + } + if err := doScaleNodeGroup(p, cfg, ng); err != nil { + logger.Critical("%s\n", err.Error()) + os.Exit(1) + } + return nil + }, + } + + group := g.New(cmd) + +<<<<<<< HEAD + fs.StringVarP(&cfg.Metadata.Name, "cluster", "n", "", "EKS cluster name") +======= + group.InFlagSet("General", func(fs *pflag.FlagSet) { + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") +>>>>>>> origin/master + + fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") + + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") + + fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + }) + + return cmd +} + +func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { + ctl := eks.New(p, cfg) + + if err := ctl.CheckAuth(); err != nil { + return err + } + + if cfg.Metadata.Name == "" { + return fmt.Errorf("no cluster name supplied. Use the --name= flag") + } + + if ng.DesiredCapacity < 0 { + return fmt.Errorf("number of nodes must be 0 or greater. Use the --nodes/-N flag") + } + + stackManager := ctl.NewStackManager(cfg) + err := stackManager.ScaleNodeGroup(ng) + if err != nil { + return fmt.Errorf("failed to scale nodegroup for cluster %q, error %v", cfg.Metadata.Name, err) + } + + return nil +} diff --git a/pkg/eks/api/api.go b/pkg/eks/api/api.go index ba2379f19aa..c3e141cfe70 100644 --- a/pkg/eks/api/api.go +++ b/pkg/eks/api/api.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/weaveworks/eksctl/pkg/utils" ) const ( @@ -166,11 +167,13 @@ func (c *ClusterConfig) AppendAvailabilityZone(newAZ string) { c.AvailabilityZones = append(c.AvailabilityZones, newAZ) } -// NewNodeGroup crears new nodegroup inside cluster config, +// NewNodeGroup creates new nodegroup inside cluster config, // it returns pointer to the nodegroup for convenience func (c *ClusterConfig) NewNodeGroup() *NodeGroup { + name := utils.NodegroupName() ng := &NodeGroup{ - ID: len(c.NodeGroups), + Name: name, + PrivateNetworking: false, } @@ -182,7 +185,7 @@ func (c *ClusterConfig) NewNodeGroup() *NodeGroup { // NodeGroup holds all configuration attributes that are // specific to a nodegroup type NodeGroup struct { - ID int + Name string AMI string AMIFamily string @@ -197,6 +200,7 @@ type NodeGroup struct { VolumeSize int + Labels NodeLabels MaxPodsPerNode int PolicyARNs []string @@ -229,4 +233,6 @@ type ( PolicyAutoScaling bool PolicyExternalDNS bool } + // NodeLabels labels nodes via kubelet's --node-labels flag + NodeLabels map[string]string ) diff --git a/pkg/eks/api/api_suite_test.go b/pkg/eks/api/api_suite_test.go new file mode 100644 index 00000000000..b0f0fba6a2c --- /dev/null +++ b/pkg/eks/api/api_suite_test.go @@ -0,0 +1,11 @@ +package api + +import ( + "testing" + + "github.com/weaveworks/eksctl/pkg/testutils" +) + +func TestCFNManager(t *testing.T) { + testutils.RegisterAndRun(t, "eks api Suite") +} diff --git a/pkg/eks/api/node_labels.go b/pkg/eks/api/node_labels.go new file mode 100644 index 00000000000..3a03289384f --- /dev/null +++ b/pkg/eks/api/node_labels.go @@ -0,0 +1,44 @@ +package api + +import ( + "errors" + "fmt" + "strings" +) + +func (f NodeLabels) String() string { + s := "" + for k, v := range f { + if s != "" { + s += "," + } + s += k + "=" + v + } + return s +} + +// Set parses the value and add the resulting key-value pairs as node labels. +// Each value is expected to include one or more key-value pairs, formatted as `KEY_1=VAL_1,KEY_2=VAL_2,...`. +// That is, the key and the value must be delimited by an equal sign, while each pair must be delimited by a comma. +func (f *NodeLabels) Set(value string) error { + if *f == nil { + *f = map[string]string{} + } + kvs := strings.Split(value, ",") + for i := range kvs { + kv := strings.Split(kvs[i], "=") + if len(kv) != 2 { + return fmt.Errorf("node label must be formatted K=V, but it was: %s", kvs[i]) + } + if kv[0] == "" { + return errors.New("key must not be omitted in --node-labels") + } + (*f)[kv[0]] = kv[1] + } + return nil +} + +// Type returns the name of the type as a string +func (f *NodeLabels) Type() string { + return "NodeLabels" +} diff --git a/pkg/eks/api/node_labels_test.go b/pkg/eks/api/node_labels_test.go new file mode 100644 index 00000000000..e8666e9e093 --- /dev/null +++ b/pkg/eks/api/node_labels_test.go @@ -0,0 +1,34 @@ +package api + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "strings" +) + +var _ = Describe("StackCollection Template", func() { + var ( + nodeLabels NodeLabels + ) + + BeforeEach(func() { + nodeLabels = NodeLabels{} + }) + + Describe("Type", func() { + It("should have the fixed, expected value", func() { + Expect(nodeLabels.Type()).To(Equal("NodeLabels")) + }) + }) + + Describe("Set", func() { + BeforeEach(func() { + nodeLabels.Set("k1=v1,k2=v2") + nodeLabels.Set("k2=v2_,k3=v3") + }) + + It("should parse and merge key-value pairs", func() { + Expect(strings.Split(nodeLabels.String(), ",")).To(ConsistOf("k3=v3", "k2=v2_", "k1=v1")) + }) + }) +}) diff --git a/pkg/eks/auth.go b/pkg/eks/auth.go index a63d08ec9f1..7ea156e464a 100644 --- a/pkg/eks/auth.go +++ b/pkg/eks/auth.go @@ -25,7 +25,7 @@ import ( func (c *ClusterProvider) getKeyPairName(clusterName string, ng *api.NodeGroup, fingerprint *string) string { keyNameParts := []string{"eksctl", clusterName} if ng != nil { - keyNameParts = append(keyNameParts, fmt.Sprintf("ng%d", ng.ID)) + keyNameParts = append(keyNameParts, fmt.Sprintf("nodegroup-%s", ng.Name)) } if fingerprint != nil { keyNameParts = append(keyNameParts, *fingerprint) diff --git a/pkg/eks/eks_test.go b/pkg/eks/eks_test.go index 9efec7e6feb..462df739d8f 100644 --- a/pkg/eks/eks_test.go +++ b/pkg/eks/eks_test.go @@ -15,12 +15,13 @@ import ( "github.com/stretchr/testify/mock" . "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/testutils" + "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" ) var _ = Describe("Eks", func() { var ( c *ClusterProvider - p *testutils.MockProvider + p *mockprovider.MockProvider output string ) @@ -38,7 +39,7 @@ var _ = Describe("Eks", func() { BeforeEach(func() { clusterName = "test-cluster" - p = testutils.NewMockProvider() + p = mockprovider.NewMockProvider() c = &ClusterProvider{ Provider: p, @@ -122,7 +123,7 @@ var _ = Describe("Eks", func() { clusterName = "test-cluster" logger.Level = 1 - p = testutils.NewMockProvider() + p = mockprovider.NewMockProvider() c = &ClusterProvider{ Provider: p, @@ -189,7 +190,7 @@ var _ = Describe("Eks", func() { chunkSize = 1 callNumber = 0 - p = testutils.NewMockProvider() + p = mockprovider.NewMockProvider() c = &ClusterProvider{ Provider: p, @@ -229,7 +230,7 @@ var _ = Describe("Eks", func() { BeforeEach(func() { chunkSize = 100 - p = testutils.NewMockProvider() + p = mockprovider.NewMockProvider() c = &ClusterProvider{ Provider: p, diff --git a/pkg/eks/nodegroup.go b/pkg/eks/nodegroup.go index 97e433501c2..bfa6c19633b 100644 --- a/pkg/eks/nodegroup.go +++ b/pkg/eks/nodegroup.go @@ -55,6 +55,41 @@ func (c *ClusterProvider) CreateNodeGroupAuthConfigMap(clientSet *clientset.Clie return nil } +// AddNodeGroupToAuthConfigMap updates the auth config map to include the node group +func (c *ClusterProvider) AddNodeGroupToAuthConfigMap(clientSet *clientset.Clientset, ng *api.NodeGroup) error { + cm, err := clientSet.CoreV1().ConfigMaps("kube-system").Get("aws-auth", metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "failed getting auth ConfigMap for %s", ng.Name) + } + + mapRoles := []map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(cm.Data["mapRoles"]), &mapRoles); err != nil { + return err + } + + m := make(map[string]interface{}) + m["rolearn"] = ng.InstanceRoleARN + m["username"] = "system:node:{{EC2PrivateDNSName}}" + m["groups"] = []string{ + "system:bootstrappers", + "system:nodes", + } + mapRoles = append(mapRoles, m) + + mapRolesBytes, err := yaml.Marshal(mapRoles) + if err != nil { + return err + } + + cm.Data["mapRoles"] = string(mapRolesBytes) + + if _, err := clientSet.CoreV1().ConfigMaps("kube-system").Update(cm); err != nil { + return errors.Wrapf(err, "updating auth ConfigMap for %s", ng.Name) + } + return nil +} + func isNodeReady(node *corev1.Node) bool { for _, c := range node.Status.Conditions { if c.Type == corev1.NodeReady && c.Status == corev1.ConditionTrue { diff --git a/pkg/nodebootstrap/assets.go b/pkg/nodebootstrap/assets.go index 2eb90a0e21f..bc102bebcbb 100644 --- a/pkg/nodebootstrap/assets.go +++ b/pkg/nodebootstrap/assets.go @@ -71,7 +71,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var __10EkscltAl2Conf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x92\x41\x6b\xdb\x4e\x10\xc5\xef\xfa\x14\x0b\xc9\xe1\xff\x07\xaf\x14\x3b\x6e\x0e\x01\x1d\x4c\xad\x84\x80\x6b\x87\x28\xa1\x85\xb6\x98\xf5\xee\xd8\x4c\xbd\x9a\x5d\x66\x57\xb6\x53\x93\xef\x5e\x64\x4b\xa9\x4b\x42\xe9\x4d\x9a\xdf\xe8\xbd\x99\x37\x3a\x13\xb0\x0e\x3a\x5a\x19\x3c\x68\x5c\xa2\x16\xe1\x39\x44\xa8\x8c\x30\xec\xbc\x44\x12\x35\x61\x14\x4b\xc7\x62\x5d\x2f\xc0\x42\xec\x1d\x5e\x46\x95\xfa\xe9\x48\x4c\x90\xea\x9d\x18\x88\xff\x46\x93\xc1\xff\x49\xf2\xb5\x04\xde\xa0\x86\xef\xc9\x99\x98\x38\xad\xac\xa8\x20\x2a\xa3\xa2\x12\x5e\xb1\xaa\x20\x02\x87\x6b\xf1\x50\xdc\xde\xcd\xa6\x3d\x31\xfa\x5c\xce\xc7\xc5\xcd\xe8\x69\xf2\x38\x3f\xd6\x92\x82\x36\xc8\x8e\x2a\xa0\x78\x83\x16\xf2\x0c\xa2\xce\x8e\x23\x66\x9d\x56\x0a\xb4\x49\xce\xc4\xad\x75\x0b\x65\x85\x22\x23\x42\x54\x11\xf5\x1f\x1e\x9f\x46\x5f\xe6\xf7\xb3\x71\xd9\x13\x1f\x27\x4f\xe5\x63\xf1\x30\x1f\x4f\xcb\xbf\xca\xb7\xfb\xb5\xea\xc7\xf1\xc9\x91\x7c\x47\x7c\x3a\x1b\x17\xf3\xbb\xfb\x7f\x92\xb3\x8d\xd0\x41\x34\x29\x76\xa0\xcb\xa8\x38\xe6\x27\x8f\x59\x1d\x38\x5b\x20\x75\x1f\x88\x6f\x89\x10\x52\x92\x33\x20\xd1\xe7\xe7\xfb\xd6\xec\xa5\x05\xda\xd6\x21\x02\x4b\x43\x21\x3f\xdf\x9f\x2c\xd7\x35\x54\x6a\x27\xbd\x33\x0d\xed\x42\xe8\x90\xb2\xd6\x6d\xa5\x67\xdc\xa0\x85\x15\x98\x3c\x72\x0d\x2d\xf3\xce\x48\xa4\x25\x2b\xa9\x1d\x45\x85\x04\x2c\xb1\x52\x2b\xc8\xaf\x2e\x06\xc3\x8b\x7e\x7f\x78\x39\xfc\x30\x48\xcd\x9a\x53\xd0\x9c\x9e\xef\xdf\x5e\xef\x25\x55\x87\xdf\x42\x6d\x43\xaa\x5d\xd5\x24\x91\x79\x55\x07\x90\xaa\x32\x57\xc3\xeb\xcb\xb4\xff\xba\x84\xab\x8d\xf4\xec\x36\x68\x80\x73\xb5\x0d\x1d\x20\x94\x0b\x24\x69\x90\xf3\xcc\xf9\x98\x69\xc2\x26\x9d\x13\xac\x1d\x2d\x8f\xbc\x49\xbb\xe1\x04\x31\x35\x5d\xc7\xeb\xf0\x5c\x53\xc4\x0a\x72\xe3\xf4\x1a\xb8\x8b\x15\xe2\xd6\xf1\x5a\x7a\x5b\xaf\x90\x72\x4d\xd8\x02\x86\x15\x1e\x72\x6d\x82\x3f\xcd\xa5\x39\x4b\x63\x89\xab\x37\xe7\x3d\x96\xd3\x67\x55\xd9\xdf\xee\xef\x35\x5a\x88\x2d\x4a\x7f\x04\x47\xc9\xaf\x00\x00\x00\xff\xff\xa2\x51\x55\xb0\x76\x03\x00\x00") +var __10EkscltAl2Conf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x92\x51\x6b\xdb\x30\x14\x85\xdf\xfd\x2b\x04\xed\xc3\x06\x91\xdd\xa6\x59\x1f\x0a\x7e\xf0\x16\xb7\x14\xbc\xb6\xd4\x2d\x1b\x6c\x23\xdc\x48\x37\xe1\x2e\xf2\x95\x91\xe4\xa4\x5d\xc8\x7f\x1f\x8e\xed\x2e\xa3\x65\xec\xcd\xd6\x77\x7d\x8e\xee\x39\x3e\x12\xb8\xf2\x2a\x18\xe9\x6b\x54\xb4\x20\x25\xfc\xb3\x0f\x58\x69\xa1\x9d\xad\x25\xb1\x68\x98\x82\x58\x58\x27\x56\xcd\x1c\x0d\x86\xd1\xfe\x25\xab\xe0\x97\x65\x51\x10\x37\x4f\x62\x2c\xde\x65\xc5\xf8\x7d\x14\x7d\x2b\xd1\xad\x49\xe1\x8f\xe8\x48\x14\x56\x81\x11\x15\x06\xd0\x10\x40\xd4\xe0\xa0\xc2\x80\xce\x5f\x88\xfb\xfc\xea\xfa\xf6\x66\x24\xb2\x2f\xe5\x6c\x9a\x5f\x66\x8f\xc5\xc3\xac\x3b\x8b\x72\x5e\x93\xb3\x5c\x21\x87\x4b\x32\x98\x26\x18\x54\xd2\x5d\x31\x19\xb4\x62\xe4\x75\x74\x24\xae\x8c\x9d\x83\x11\xc0\x5a\xf8\x00\x81\xd4\x5f\x1e\x9f\xb3\xaf\xb3\xbb\xdb\x69\x39\x12\x9f\x8a\xc7\xf2\x21\xbf\x9f\x4d\x6f\xca\x91\xb8\xb9\x9d\xe6\xb3\x22\xfb\x98\x17\xe5\x3f\xbd\xfa\x65\x7b\xab\x6e\x17\xb6\x2c\xdf\x70\xda\x4b\x5e\xdf\xfd\x97\x9c\x69\x85\xf6\xa2\x51\xfe\x84\xaa\x0c\xe0\x42\x7a\xf0\x98\x34\xde\x25\x73\xe2\xe1\x03\xf1\x3d\x12\x42\x4a\xb6\x1a\x25\xd5\xe9\xf1\xb6\x37\xdb\xf5\x40\x99\xc6\x07\x74\x52\xb3\x4f\x8f\xb7\x07\x9b\x0e\x03\x15\x3c\xc9\xda\xea\x96\x0e\x89\xec\x0e\x45\x0d\xcc\xd1\xf8\x41\xb8\x0b\x66\x18\x00\x63\xec\x46\xd6\x8e\xd6\x64\x70\x89\x3a\x0d\xae\xc1\x9e\xd5\x56\x4b\xe2\x85\x03\xa9\x2c\x07\x20\x46\x27\xa9\x82\x25\xa6\xe7\x27\xe3\xc9\xc9\xe9\xe9\xe4\x6c\xf2\x61\x1c\xeb\x95\x8b\x51\xb9\xf8\x78\xfb\xba\xeb\x5d\x0c\xfb\x9f\x08\x36\x3e\x56\xb6\x6a\xa3\x4a\x6a\x68\x3c\x4a\xa8\xf4\xf9\xe4\xe2\x2c\x3e\x7d\xd9\xd2\x36\x5a\xd6\xce\xae\x49\xa3\x4b\x61\xe3\x07\xc0\x24\xe7\xc4\x52\x93\x4b\x13\x5b\x87\x44\x31\xb5\xf1\x1d\x60\x65\x79\xd1\xf1\xb6\x8e\x96\x33\x86\x58\x0f\x13\x2f\x97\x77\x0d\x07\xaa\x30\xd5\x56\xad\xd0\x0d\x11\x61\xd8\x58\xb7\x92\xb5\x69\x96\xc4\xa9\x62\xea\x81\xc3\x25\xed\x83\x6f\x43\x3c\xcc\xa5\xed\xad\xb5\xa4\xe5\xab\xfe\xbb\xe3\xf8\x19\x2a\xf3\xc7\xfd\xad\x41\x83\xa1\x47\xf1\x4f\x6f\x39\xfa\x1d\x00\x00\xff\xff\x8a\xde\x69\x62\xa4\x03\x00\x00") func _10EkscltAl2ConfBytes() ([]byte, error) { return bindataRead( @@ -86,7 +86,7 @@ func _10EkscltAl2Conf() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "10-eksclt.al2.conf", size: 886, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + info := bindataFileInfo{name: "10-eksclt.al2.conf", size: 932, mode: os.FileMode(420), modTime: time.Unix(1, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -111,7 +111,7 @@ func bootstrapAl2Sh() (*asset, error) { return a, nil } -var _bootstrapUbuntuSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x94\x61\x6b\x23\x37\x13\xc7\xdf\xef\xa7\x98\xc7\x67\x78\x12\x38\xad\x93\x5c\x7a\x2f\x0e\xb6\x34\x9c\xd3\x72\x34\x8d\x8f\x24\xc7\x15\x4a\x31\x63\x69\xd6\x2b\xac\xd5\x2c\xd2\xc8\xdb\x5c\xc8\x77\x2f\xf2\x7a\x7d\x4e\xa0\x2d\x7e\x63\xe9\xff\x9b\xd9\xd1\xcc\x5f\x7a\xf3\xbf\xd9\xca\xfa\xd9\x0a\x63\x03\x8a\x52\x51\x90\x6e\x18\x26\xb7\x8b\xf9\xf5\xf2\xd3\xe7\x6a\x7a\xd2\x70\x14\x8f\x2d\x81\xb2\xa7\x13\xf8\x11\x66\x24\x7a\x46\x9b\xa8\xc5\xcd\x36\x69\x45\x8e\xa4\x74\xac\xd1\x95\xe4\xb7\x45\x11\x3d\x76\x80\xce\x62\x84\xbd\xaa\x68\x13\xcb\xfd\xff\x71\xef\x35\xa6\xc5\x1d\x30\x2d\x6e\xdc\x1b\xb0\x28\xdc\x1d\x27\x2b\xe2\x63\x14\x6a\x33\x17\x28\x92\xa8\x1a\xad\x23\x53\x14\x27\x05\xc0\x1b\x78\x58\xcc\x17\x1f\x40\x1a\x8a\x04\xb1\xe1\xe4\x0c\xac\x08\x1c\xf3\x86\x0c\xa0\x00\x6d\x29\x3c\x82\xd8\x96\xc6\xa4\x10\x05\x83\x44\x48\xdd\xdb\x5d\x86\xbe\xb1\xba\x01\x1b\xa1\x6f\x50\xa0\x27\x30\x0c\xd6\xc3\xd5\xcd\x05\x9c\x1c\xb4\x15\x46\x32\xc0\x1e\x3a\x87\xd6\xc3\x50\x93\x19\x12\xa0\x37\xd0\x12\x7a\x01\xe1\xfc\xf1\x8e\x83\xe0\xca\x51\x5e\xb6\x1c\x65\xa4\xc1\xd8\x28\x81\xe3\xe9\x5b\x58\x25\x01\x2b\xff\x8f\xbb\x78\xcf\x02\xda\x11\x06\x68\xb8\xcf\x41\x8e\xd1\xec\x8f\x54\x07\x6e\xbf\x17\x9e\xfb\xd3\x5b\x69\x38\x09\x34\xb8\xb5\x7e\xbd\x4b\x20\x0c\x3a\x45\xe1\xd6\x46\xca\x71\x03\x68\x25\x92\xab\x0b\x80\xc8\x29\x68\xfa\x8f\x51\xfe\x2b\xf6\x8f\x40\x4b\x82\x06\x05\x07\x37\x00\xd4\x0e\xd7\xb1\xca\x93\x01\x98\xa0\x31\x81\x62\xac\xce\xca\xdd\x6f\x32\xec\x7a\x36\xa4\x6c\x57\x4d\x9f\xf6\xae\x7b\xde\x0b\xda\xa5\x28\x14\x94\xf1\xb1\x9a\x3e\x7d\xbc\xf9\x72\xff\x70\x7d\xb7\x9c\xdf\xde\x8f\x40\x8b\x7f\xa9\x8e\x4d\x56\x7f\xbb\xfa\x7d\xf9\x79\x31\x3f\x48\x98\xa4\x21\x2f\x56\xa3\x58\xf6\x4a\x78\x43\x5e\xf5\xb4\x6a\x98\x37\x95\x84\x44\x47\x1c\x07\xfb\x6d\xc0\x5a\x36\x54\x7d\x1d\xa8\x11\x70\x8e\x7b\xd5\x05\xbb\xb5\x8e\xd6\x64\x8e\x83\x3b\x36\xca\xfa\x3a\xa0\xd2\xec\x05\xad\xa7\xa0\x6c\x8b\x6b\xaa\xde\x9f\x5d\x5c\x9e\x9d\x9f\x5f\xbe\xbb\xfc\xe1\xa2\x34\x9b\x50\x92\x0e\xe5\xf4\xe9\xea\xeb\xfd\x72\x7e\xfd\xf3\xd5\x97\x9b\x87\xe5\xdd\xf5\x2f\x9f\x16\xb7\xcf\x25\xb6\xf8\x8d\x3d\xf6\xb1\xd4\xdc\xe6\x36\xce\x3a\x4c\x91\x14\xb6\xe6\xfd\xe5\x87\x77\xe5\xf9\xa1\x1b\x9c\x8c\xea\x02\x6f\xad\xa1\x50\x61\x1f\x5f\xb7\x89\x5b\xb4\xbe\xda\x2f\x87\x49\x8e\x88\xb7\x6a\x65\xbd\x32\x36\x54\x33\xee\x64\xa6\xbd\xcd\xf7\xfe\x48\xd6\xec\xeb\x41\xcf\xd3\xcc\xba\x27\x29\xcd\x48\x1c\xce\x17\x92\xcf\x77\xa7\x32\xac\x37\x14\xc6\x11\x92\xf4\x1c\x36\xaa\x73\x69\x9d\x4b\xf0\x76\x8c\x5b\x07\x4e\x9d\x32\xc1\x6e\x29\x54\xc3\xaa\x1e\x0b\x0f\xb4\xb6\xbb\xca\xb3\x03\x8e\xfb\xba\xbb\xfe\xec\x6b\xbb\xae\x5e\x9b\x6f\xd8\x2e\x1f\xb1\x1d\xcf\x56\x13\x4a\x0a\xa4\xd6\x28\x14\xab\x3b\x16\x14\xfa\x75\xb0\xe9\x3d\x85\x2d\x85\x8f\x14\xc4\xd6\xd9\x09\x2f\x3e\x82\x9e\xfd\x63\xcb\x29\xaa\xec\x81\xaa\x46\x17\xe9\xd0\x51\x4b\x5e\x94\x46\x55\x5b\x47\x2f\x6a\xd0\x58\xea\x20\x99\x3b\xcd\xf6\x1e\x1e\xa8\xef\x0f\x5b\x7e\x9f\x60\x32\x7d\xda\xd9\xfe\x8f\x9f\xfe\x7c\x9e\x14\xa7\xc5\xf8\x8c\x61\x78\xc1\x15\x7f\x07\x00\x00\xff\xff\x0e\x70\xdd\x0b\x7c\x05\x00\x00") +var _bootstrapUbuntuSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x94\x51\x6f\xdb\x46\x0c\xc7\xdf\xf5\x29\x38\x37\xc0\x1a\xa0\x27\x27\x69\xd6\x87\x02\x1a\xe6\xd5\xde\x50\xcc\x8b\x8b\x24\x45\x07\x0c\x83\x41\xdd\x51\xd6\xc1\xa7\xa3\x70\x47\x59\x4b\x83\x7c\xf7\xe1\x2c\xcb\x75\x02\x6c\x83\x5f\x7c\xfc\xff\x48\xf1\x48\x1e\x5f\x7d\x37\x2d\xad\x9f\x96\x18\x6b\x50\xd4\x65\x19\xe9\x9a\x61\x72\xb3\x9a\x2f\xd6\x1f\x3f\x15\x67\xaf\x6b\x8e\xe2\xb1\x21\x50\xf6\x7c\x02\x3f\xc2\x94\x44\x4f\x69\x1b\xb5\xb8\xe9\xb6\x2b\xc9\x91\xe4\x8e\x35\xba\x9c\xfc\x2e\xcb\xa2\xc7\x16\xd0\x59\x8c\x70\x50\x15\x6d\x63\x7e\xf8\x3f\xda\x5e\x62\x5a\xdc\x11\xd3\xe2\x46\xdb\x80\x45\xe1\xf6\x34\x58\x16\x1f\xa2\x50\x93\xb8\x40\x91\x44\x55\x68\x1d\x99\x2c\x7b\x9d\x01\xbc\x82\xfb\xd5\x7c\xf5\x1e\xa4\xa6\x48\x10\x6b\xee\x9c\x81\x92\xc0\x31\x6f\xc9\x00\x0a\xd0\x8e\xc2\x03\x88\x6d\x68\x0c\x0a\x51\x30\x48\x84\xae\x7d\xb3\x8f\xd0\xd7\x56\xd7\x60\x23\xf4\x35\x0a\xf4\x04\x86\xc1\x7a\x98\x2d\xaf\xe0\xf5\x51\x2b\x31\x92\x01\xf6\xd0\x3a\xb4\x1e\x86\x9c\xcc\x10\x00\xbd\x81\x86\xd0\x0b\x08\xa7\x8f\xb7\x1c\x04\x4b\x47\xe9\xd8\x70\x94\x91\x06\x63\xa3\x04\x8e\xe7\x6f\xa0\xec\x04\xac\x7c\x1f\xf7\xfe\x9e\x05\xb4\x23\x0c\x50\x73\x9f\x9c\x1c\xa3\x39\x5c\xa9\x0a\xdc\x7c\x4b\x3c\xd5\xa7\xb7\x52\x73\x27\x50\xe3\xce\xfa\xcd\x3e\x80\x30\xe8\x2e\x0a\x37\x36\x52\xf2\x1b\x40\x2b\x91\x5c\x95\x01\x44\xee\x82\xa6\xff\x69\xe5\x7f\x62\xff\x0a\x34\x24\x68\x50\x70\x98\x06\x80\xca\xe1\x26\x16\xa9\x33\x00\x13\x34\x26\x50\x8c\xc5\x45\xbe\xff\x4d\x06\xab\x67\x43\xca\xb6\xc5\xd9\xe3\x61\xea\x9e\x0e\x82\x76\x5d\x14\x0a\xca\xf8\x58\x9c\x3d\x7e\x58\x7e\xbe\xbb\x5f\xdc\xae\xe7\x37\x77\x23\xd0\xe0\xdf\xaa\x65\x93\xd4\xdf\x67\x7f\xac\x3f\xad\xe6\x47\x69\x1f\xd4\x61\x49\x2e\x8e\x81\x97\xb3\x9f\x17\xcb\x23\x80\x9d\xd4\xe4\xc5\x6a\x14\xcb\x5e\x09\x6f\xc9\xab\x9e\xca\x9a\x79\x5b\x48\xe8\xe8\x84\xe3\x60\xbf\x0e\x58\xc3\x86\x8a\x2f\x03\x35\x02\xce\x71\xaf\xda\x60\x77\xd6\xd1\x86\xcc\xa9\x73\xcb\x46\x59\x5f\x05\x54\x9a\xbd\xa0\xf5\x14\x94\x6d\x70\x43\xc5\xbb\x8b\xab\xeb\x8b\xcb\xcb\xeb\xb7\xd7\x3f\x5c\xe5\x66\x1b\x72\xd2\x21\x3f\x7b\x9c\x7d\xb9\x5b\xcf\x17\xbf\xcc\x3e\x2f\xef\xd7\xb7\x8b\x5f\x3f\xae\x6e\x9e\x72\x6c\xf0\x2b\x7b\xec\x63\xae\xb9\x49\x75\x9e\xb6\xd8\x45\x52\xd8\x98\x77\xd7\xef\xdf\xe6\x97\xc7\x72\x71\x67\x54\x1b\x78\x67\x0d\x85\x02\xfb\xf8\xb2\x8e\xdc\xa0\xf5\xc5\xe1\x38\xb4\x7a\x44\xbc\x55\xa5\xf5\xca\xd8\x50\x4c\xb9\x95\xa9\xf6\x36\x2d\x86\x13\x59\xb3\xaf\x06\x3d\xb5\x3b\xe9\x9e\x24\x37\x23\x71\xbc\x5f\xe8\x7c\x7a\x5c\x85\x61\xbd\xa5\x30\xb6\x83\xa4\xe7\xb0\x55\xad\xeb\x36\x29\x05\x6f\x47\xbf\x4d\xe0\xae\x55\x26\xd8\x1d\x85\x62\x38\x55\x63\xe2\x81\x36\x76\x9f\x79\xea\xe6\x69\x5d\xf7\xfb\x81\x7d\x65\x37\xc5\xcb\xe9\x1c\xcc\xf9\x03\x36\xe3\xdd\x2a\x42\xe9\x02\xa9\x0d\x0a\xc5\xe2\x96\x05\x85\x7e\x1b\xe6\xf8\x8e\xc2\x8e\xc2\x07\x0a\x62\xab\x34\x09\xcf\x3e\x82\x9e\xfd\x43\xc3\x5d\x54\x69\x06\x8a\x0a\x5d\xa4\x63\x45\x2d\x79\x51\x1a\x55\x65\x1d\x3d\xcb\x41\x63\xae\x83\x24\xee\x3c\xcd\xff\xb0\xc1\xbe\x6d\xbe\xb4\xc0\x60\x72\xf6\xb8\x7f\x17\x7f\xfe\xf4\xd7\xd3\x24\x3b\xcf\xc6\x3d\x87\xe1\x19\x97\xfd\x13\x00\x00\xff\xff\xe6\xbd\xd9\x7d\x9d\x05\x00\x00") func bootstrapUbuntuShBytes() ([]byte, error) { return bindataRead( @@ -126,7 +126,7 @@ func bootstrapUbuntuSh() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "bootstrap.ubuntu.sh", size: 1404, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + info := bindataFileInfo{name: "bootstrap.ubuntu.sh", size: 1437, mode: os.FileMode(420), modTime: time.Unix(1, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/nodebootstrap/assets/10-eksclt.al2.conf b/pkg/nodebootstrap/assets/10-eksclt.al2.conf index 4f26428a697..08af22d0236 100644 --- a/pkg/nodebootstrap/assets/10-eksclt.al2.conf +++ b/pkg/nodebootstrap/assets/10-eksclt.al2.conf @@ -3,7 +3,7 @@ [Service] # Local metadata parameters: REGION, AWS_DEFAULT_REGION EnvironmentFile=/etc/eksctl/metadata.env -# Global and static parameters: MAX_PODS, CLUSTER_DNS +# Global and static parameters: MAX_PODS, CLUSTER_DNS, NODE_LABELS EnvironmentFile=/etc/eksctl/kubelet.env # Local non-static parameters: NODE_IP EnvironmentFile=/etc/eksctl/kubelet.local.env @@ -13,6 +13,7 @@ ExecStart=/usr/bin/kubelet \ --node-ip=${NODE_IP} \ --cluster-dns=${CLUSTER_DNS} \ --max-pods=${MAX_PODS} \ + --node-labels=${NODE_LABELS} \ --allow-privileged=true \ --pod-infra-container-image=602401143452.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/eks/pause-amd64:3.1 \ --cloud-provider=aws \ diff --git a/pkg/nodebootstrap/assets/bootstrap.ubuntu.sh b/pkg/nodebootstrap/assets/bootstrap.ubuntu.sh index 1a9979ddc53..85b56b11951 100644 --- a/pkg/nodebootstrap/assets/bootstrap.ubuntu.sh +++ b/pkg/nodebootstrap/assets/bootstrap.ubuntu.sh @@ -22,6 +22,7 @@ systemctl reset-failed "node-ip=${NODE_IP}" "cluster-dns=${CLUSTER_DNS}" "max-pods=${MAX_PODS}" + "node-labels=${NODE_LABELS}" "authentication-token-webhook=true" "authorization-mode=Webhook" "allow-privileged=true" diff --git a/pkg/nodebootstrap/userdata.go b/pkg/nodebootstrap/userdata.go index 13fda4afbee..71e03cc5ed2 100644 --- a/pkg/nodebootstrap/userdata.go +++ b/pkg/nodebootstrap/userdata.go @@ -69,10 +69,10 @@ func addFilesAndScripts(config *cloudconfig.CloudConfig, files configFiles, scri return nil } -func makeClientConfigData(spec *api.ClusterConfig, nodeGroupID int) ([]byte, error) { +func makeClientConfigData(spec *api.ClusterConfig, ng *api.NodeGroup) ([]byte, error) { clientConfig, _, _ := kubeconfig.New(spec, "kubelet", configDir+"ca.crt") authenticator := kubeconfig.AWSIAMAuthenticator - if spec.NodeGroups[nodeGroupID].AMIFamily == ami.ImageFamilyUbuntu1804 { + if ng.AMIFamily == ami.ImageFamilyUbuntu1804 { authenticator = kubeconfig.HeptioAuthenticatorAWS } kubeconfig.AppendAuthenticator(clientConfig, spec, authenticator) @@ -92,15 +92,16 @@ func clusterDNS(spec *api.ClusterConfig) string { return "10.100.0.10" } -func makeKubeletParams(spec *api.ClusterConfig, nodeGroupID int) []string { - ng := spec.NodeGroups[nodeGroupID] +func makeKubeletParamsCommon(spec *api.ClusterConfig, ng *api.NodeGroup) []string { if ng.MaxPodsPerNode == 0 { ng.MaxPodsPerNode = maxPodsPerNodeType[ng.InstanceType] } + // TODO: use componentconfig or kubelet config file – https://github.com/weaveworks/eksctl/issues/156 return []string{ fmt.Sprintf("MAX_PODS=%d", ng.MaxPodsPerNode), fmt.Sprintf("CLUSTER_DNS=%s", clusterDNS(spec)), + fmt.Sprintf("NODE_LABELS=%s", ng.Labels), } } @@ -113,12 +114,12 @@ func makeMetadata(spec *api.ClusterConfig) []string { } // NewUserData creates new user data for a given node image family -func NewUserData(spec *api.ClusterConfig, nodeGroupID int) (string, error) { - switch spec.NodeGroups[nodeGroupID].AMIFamily { +func NewUserData(spec *api.ClusterConfig, ng *api.NodeGroup) (string, error) { + switch ng.AMIFamily { case ami.ImageFamilyAmazonLinux2: - return NewUserDataForAmazonLinux2(spec, nodeGroupID) + return NewUserDataForAmazonLinux2(spec, ng) case ami.ImageFamilyUbuntu1804: - return NewUserDataForUbuntu1804(spec, nodeGroupID) + return NewUserDataForUbuntu1804(spec, ng) default: return "", nil } diff --git a/pkg/nodebootstrap/userdata_al2.go b/pkg/nodebootstrap/userdata_al2.go index 0c7ee2bd096..a74168ab2a2 100644 --- a/pkg/nodebootstrap/userdata_al2.go +++ b/pkg/nodebootstrap/userdata_al2.go @@ -9,19 +9,23 @@ import ( "github.com/weaveworks/eksctl/pkg/eks/api" ) -func makeAmazonLinux2Config(spec *api.ClusterConfig, nodeGroupID int) (configFiles, error) { - clientConfigData, err := makeClientConfigData(spec, nodeGroupID) +func makeAmazonLinux2Config(spec *api.ClusterConfig, ng *api.NodeGroup) (configFiles, error) { + clientConfigData, err := makeClientConfigData(spec, ng) if err != nil { return nil, err } + if spec.CertificateAuthorityData == nil || len(spec.CertificateAuthorityData) == 0 { + return nil, errors.New("invalid cluster config: missing CertificateAuthorityData") + } + files := configFiles{ kubeletDropInUnitDir: { "10-eksclt.al2.conf": {isAsset: true}, }, configDir: { "metadata.env": {content: strings.Join(makeMetadata(spec), "\n")}, - "kubelet.env": {content: strings.Join(makeKubeletParams(spec, nodeGroupID), "\n")}, + "kubelet.env": {content: strings.Join(makeKubeletParamsCommon(spec, ng), "\n")}, // TODO: https://github.com/weaveworks/eksctl/issues/161 "kubelet-config.json": {isAsset: true}, "ca.crt": {content: string(spec.CertificateAuthorityData)}, @@ -33,14 +37,14 @@ func makeAmazonLinux2Config(spec *api.ClusterConfig, nodeGroupID int) (configFil } // NewUserDataForAmazonLinux2 creates new user data for Amazon Linux 2 nodes -func NewUserDataForAmazonLinux2(spec *api.ClusterConfig, nodeGroupID int) (string, error) { +func NewUserDataForAmazonLinux2(spec *api.ClusterConfig, ng *api.NodeGroup) (string, error) { config := cloudconfig.New() scripts := []string{ "bootstrap.al2.sh", } - files, err := makeAmazonLinux2Config(spec, nodeGroupID) + files, err := makeAmazonLinux2Config(spec, ng) if err != nil { return "", err } diff --git a/pkg/nodebootstrap/userdata_ubuntu.go b/pkg/nodebootstrap/userdata_ubuntu.go index bf53f3653f6..38376783752 100644 --- a/pkg/nodebootstrap/userdata_ubuntu.go +++ b/pkg/nodebootstrap/userdata_ubuntu.go @@ -9,8 +9,8 @@ import ( "github.com/weaveworks/eksctl/pkg/eks/api" ) -func makeUbuntu1804Config(spec *api.ClusterConfig, nodeGroupID int) (configFiles, error) { - clientConfigData, err := makeClientConfigData(spec, nodeGroupID) +func makeUbuntu1804Config(spec *api.ClusterConfig, ng *api.NodeGroup) (configFiles, error) { + clientConfigData, err := makeClientConfigData(spec, ng) if err != nil { return nil, err } @@ -18,7 +18,7 @@ func makeUbuntu1804Config(spec *api.ClusterConfig, nodeGroupID int) (configFiles files := configFiles{ configDir: { "metadata.env": {content: strings.Join(makeMetadata(spec), "\n")}, - "kubelet.env": {content: strings.Join(makeKubeletParams(spec, nodeGroupID), "\n")}, + "kubelet.env": {content: strings.Join(makeKubeletParamsCommon(spec, ng), "\n")}, // TODO: https://github.com/weaveworks/eksctl/issues/161 "ca.crt": {content: string(spec.CertificateAuthorityData)}, "kubeconfig.yaml": {content: string(clientConfigData)}, @@ -29,14 +29,14 @@ func makeUbuntu1804Config(spec *api.ClusterConfig, nodeGroupID int) (configFiles } // NewUserDataForUbuntu1804 creates new user data for Ubuntu 18.04 nodes -func NewUserDataForUbuntu1804(spec *api.ClusterConfig, nodeGroupID int) (string, error) { +func NewUserDataForUbuntu1804(spec *api.ClusterConfig, ng *api.NodeGroup) (string, error) { config := cloudconfig.New() scripts := []string{ "bootstrap.ubuntu.sh", } - files, err := makeUbuntu1804Config(spec, nodeGroupID) + files, err := makeUbuntu1804Config(spec, ng) if err != nil { return "", err } diff --git a/pkg/testutils/mock_provider.go b/pkg/testutils/mockprovider/mock_provider.go similarity index 99% rename from pkg/testutils/mock_provider.go rename to pkg/testutils/mockprovider/mock_provider.go index 2ca454464be..37b9126c303 100644 --- a/pkg/testutils/mock_provider.go +++ b/pkg/testutils/mockprovider/mock_provider.go @@ -1,4 +1,4 @@ -package testutils +package mockprovider import ( "time" diff --git a/pkg/utils/nodegroup_name.go b/pkg/utils/nodegroup_name.go new file mode 100644 index 00000000000..ac0c992a63d --- /dev/null +++ b/pkg/utils/nodegroup_name.go @@ -0,0 +1,23 @@ +package utils + +import ( + "math/rand" + "time" + "fmt" +) + +var r = rand.New(rand.NewSource(time.Now().UnixNano())) + +const ( + randNodegroupNameLength = 8 + randNodeGroupNameComponents = "abcdef0123456789" +) + +// NodegroupName generates a random hex string with the fixed length of 8 +func NodegroupName() string { + b := make([]byte, randNodegroupNameLength) + for i := 0; i < randNodegroupNameLength; i++ { + b[i] = randNodeGroupNameComponents[r.Intn(len(randNodeGroupNameComponents))] + } + return fmt.Sprintf("ng-%s", string(b)) +}