From 4ad2d885934b54fa4db750a3163fe12e5c91d2a9 Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Thu, 27 Dec 2018 09:31:39 +0000 Subject: [PATCH] Refactoring and cleanup - move name generator functions into one file make usage consistent - review stack manager code - make examples in docs more consisten - update output message formating, improve cosistentency - update some usage messages, move more flags into cmdutils - refactor `ctl.GetCredentials` - do not reset tags when scaling nodegroup --- README.md | 14 ++-- pkg/cfn/manager/api.go | 26 ++++---- pkg/cfn/manager/cluster.go | 2 +- pkg/cfn/manager/deprecated.go | 8 +-- pkg/cfn/manager/nodegroup.go | 40 +++++++----- pkg/cfn/manager/tasks.go | 105 ++++++++++++------------------ pkg/ctl/cmdutils/cmdutils.go | 17 +++-- pkg/ctl/completion/completion.go | 3 +- pkg/ctl/create/cluster.go | 11 ++-- pkg/ctl/create/nodegroup.go | 60 ++++++----------- pkg/ctl/delete/cluster.go | 15 ++--- pkg/ctl/delete/delete.go | 2 +- pkg/ctl/delete/nodegroup.go | 76 ++++++++++----------- pkg/ctl/get/cluster.go | 10 ++- pkg/ctl/get/get.go | 4 -- pkg/ctl/get/nodegroup.go | 38 +++++------ pkg/ctl/scale/nodegroup.go | 38 ++++++----- pkg/ctl/utils/describe_stacks.go | 2 +- pkg/ctl/utils/write_kubeconfig.go | 11 +--- pkg/eks/api/api.go | 3 - pkg/eks/eks.go | 12 +++- pkg/eks/nodegroup.go | 8 +-- pkg/utils/namer.go | 55 ++++++++++++++++ pkg/utils/nodegroup_name.go | 23 ------- pkg/utils/utils.go | 20 ------ 25 files changed, 291 insertions(+), 312 deletions(-) create mode 100644 pkg/utils/namer.go delete mode 100644 pkg/utils/nodegroup_name.go diff --git a/README.md b/README.md index a056e1bd972..959072fe163 100644 --- a/README.md +++ b/README.md @@ -172,28 +172,28 @@ eksctl delete cluster --name= [--region=] You can add one or more nodegroups in addition to the initial nodegroup created along with the cluster. -To create an additional nodegroup, run: +To create an additional nodegroup, use: ``` -eksctl create nodegroup --cluster= +eksctl create nodegroup --cluster= [--name=] ``` To list the details about a nodegroup or all of the nodegroups, use: ``` -eksctl get nodegroup --cluster= [] +eksctl get nodegroup --cluster= [--name=] ``` A nodegroup can be scaled by using the `eksctl scale nodegroup` command: ``` -eksctl delete nodegroup --cluster= --nodes= +eksctl delete nodegroup --cluster= --nodes= --name= ``` -For example, to scale the nodegroup `ng-abcd1234` to 5 nodes: +For example, to scale nodegroup `ng-a345f4e1` in `cluster-1` to 5 nodes, run: ``` -eksctl scale nodegroup --cluster= --nodes=5 ng-abcd1234 +eksctl scale nodegroup --cluster=cluster-1 --nodes=5 ng-a345f4e1 ``` If the desired number of nodes is greater than the current maximum set on the ASG then the maximum value will be increased to match the number of requested nodes. And likewise for the minimum. @@ -205,7 +205,7 @@ Scaling a nodegroup works by modifying the nodegroup CloudFormation stack via a To delete a nodegroup, run: ``` -eksctl delete nodegroup --cluster= +eksctl delete nodegroup --cluster= --name= ``` ### VPC Networking diff --git a/pkg/cfn/manager/api.go b/pkg/cfn/manager/api.go index c0c71982c98..73fed00221e 100644 --- a/pkg/cfn/manager/api.go +++ b/pkg/cfn/manager/api.go @@ -18,7 +18,8 @@ const ( ClusterNameTag = "eksctl.cluster.k8s.io/v1alpha1/cluster-name" // NodeGroupNameTag defines the tag of the node group name - NodeGroupNameTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-name" + NodeGroupNameTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-name" + oldNodeGroupIDTag = "eksctl.cluster.k8s.io/v1alpha1/nodegroup-id" ) var ( @@ -95,7 +96,7 @@ func (c *StackCollection) doCreateStackRequest(i *Stack, templateBody []byte, pa // CreateStack with given name, stack builder instance and parameters; // any errors will be written to errs channel, when nil is written, // assume completion, do not expect more then one error value on the -// channel, it's closed immediately after it is written two +// channel, it's closed immediately after it is written to func (c *StackCollection) CreateStack(name string, stack builder.ResourceSet, parameters map[string]string, errs chan error) error { i := &Stack{StackName: &name} templateBody, err := stack.RenderJSON() @@ -225,8 +226,11 @@ func (c *StackCollection) DeleteStack(name string) (*Stack, error) { fmt.Sprintf("%s:%s", ClusterNameTag, c.spec.Metadata.Name)) } -// WaitDeleteStack kills a stack by name and waits for DELETED status -func (c *StackCollection) WaitDeleteStack(name string) error { +// WaitDeleteStack kills a stack by name and waits for DELETED status; +// any errors will be written to errs channel, when nil is written, +// assume completion, do not expect more then one error value on the +// channel, it's closed immediately after it is written to +func (c *StackCollection) WaitDeleteStack(name string, errs chan error) error { i, err := c.DeleteStack(name) if err != nil { return err @@ -234,12 +238,13 @@ func (c *StackCollection) WaitDeleteStack(name string) error { logger.Info("waiting for stack %q to get deleted", *i.StackName) - return c.doWaitUntilStackIsDeleted(i) + go c.waitUntilStackIsDeleted(i, errs) + + return nil } -// WaitDeleteStackTask kills a stack by name and waits for DELETED status -// When nil is returned, the `errs` channel must receive an `error` object or `nil`. -func (c *StackCollection) WaitDeleteStackTask(name string, errs chan error) error { +// BlockingWaitDeleteStack kills a stack by name and waits for DELETED status +func (c *StackCollection) BlockingWaitDeleteStack(name string) error { i, err := c.DeleteStack(name) if err != nil { return err @@ -247,9 +252,7 @@ func (c *StackCollection) WaitDeleteStackTask(name string, errs chan error) erro logger.Info("waiting for stack %q to get deleted", *i.StackName) - go c.waitUntilStackIsDeleted(i, errs) - - return nil + return c.doWaitUntilStackIsDeleted(i) } // DescribeStacks describes the existing stacks @@ -287,7 +290,6 @@ func (c *StackCollection) doCreateChangeSetRequest(i *Stack, action string, desc input.SetChangeSetType(cloudformation.ChangeSetTypeUpdate) - input.SetTags(c.tags) input.SetTemplateBody(string(templateBody)) if withIAM { diff --git a/pkg/cfn/manager/cluster.go b/pkg/cfn/manager/cluster.go index 1f41d64ee19..d83572d341a 100644 --- a/pkg/cfn/manager/cluster.go +++ b/pkg/cfn/manager/cluster.go @@ -29,5 +29,5 @@ func (c *StackCollection) DeleteCluster() error { // WaitDeleteCluster waits till the cluster is deleted func (c *StackCollection) WaitDeleteCluster() error { - return c.WaitDeleteStack(c.makeClusterStackName()) + return c.BlockingWaitDeleteStack(c.makeClusterStackName()) } diff --git a/pkg/cfn/manager/deprecated.go b/pkg/cfn/manager/deprecated.go index 923f0dd231e..74a00bc8d2e 100644 --- a/pkg/cfn/manager/deprecated.go +++ b/pkg/cfn/manager/deprecated.go @@ -6,7 +6,7 @@ func (c *StackCollection) DeprecatedDeleteStackVPC(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-VPC" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -20,7 +20,7 @@ func (c *StackCollection) DeprecatedDeleteStackServiceRole(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-ServiceRole" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -34,7 +34,7 @@ func (c *StackCollection) DeprecatedDeleteStackDefaultNodeGroup(wait bool) error stackName := "EKS-" + c.spec.Metadata.Name + "-DefaultNodeGroup" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } @@ -48,7 +48,7 @@ func (c *StackCollection) DeprecatedDeleteStackControlPlane(wait bool) error { stackName := "EKS-" + c.spec.Metadata.Name + "-ControlPlane" if wait { - err = c.WaitDeleteStack(stackName) + err = c.BlockingWaitDeleteStack(stackName) } else { _, err = c.DeleteStack(stackName) } diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index 89f719d1931..4337bd20c48 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -76,25 +76,22 @@ func (c *StackCollection) listAllNodeGroups() ([]string, error) { } // DeleteNodeGroup deletes a nodegroup stack -func (c *StackCollection) DeleteNodeGroup(errs chan error, data interface{}) error { - defer close(errs) - name := data.(string) - stack := c.MakeNodeGroupStackName(name) - _, err := c.DeleteStack(stack) - errs <- err - return nil +func (c *StackCollection) DeleteNodeGroup(name string) error { + name = c.MakeNodeGroupStackName(name) + _, err := c.DeleteStack(name) + return err } // WaitDeleteNodeGroup waits until the nodegroup is deleted func (c *StackCollection) WaitDeleteNodeGroup(errs chan error, data interface{}) error { - name := data.(string) - stack := c.MakeNodeGroupStackName(name) - return c.WaitDeleteStackTask(stack, errs) + name := c.MakeNodeGroupStackName(data.(string)) + return c.WaitDeleteStack(name, errs) } -// ScaleInitialNodeGroup will scale the first nodegroup (ID: 0) -func (c *StackCollection) ScaleInitialNodeGroup() error { - return c.ScaleNodeGroup(c.spec.NodeGroups[0]) +// BlockingWaitDeleteNodeGroup waits until the nodegroup is deleted +func (c *StackCollection) BlockingWaitDeleteNodeGroup(name string) error { + name = c.MakeNodeGroupStackName(name) + return c.BlockingWaitDeleteStack(name) } // ScaleNodeGroup will scale an existing nodegroup @@ -154,7 +151,7 @@ func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { } // GetNodeGroupSummaries returns a list of summaries for the nodegroups of a cluster -func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { +func (c *StackCollection) GetNodeGroupSummaries(name string) ([]*NodeGroupSummary, error) { stacks, err := c.ListStacks(fmt.Sprintf("^(eksctl|EKS)-%s-nodegroup-.+$", c.spec.Metadata.Name)) if err != nil { return nil, errors.Wrap(err, "getting nodegroup stacks") @@ -162,15 +159,21 @@ func (c *StackCollection) GetNodeGroupSummaries() ([]*NodeGroupSummary, error) { summaries := []*NodeGroupSummary{} for _, stack := range stacks { - logger.Info("stack %s\n", *stack.StackName) + if *stack.StackStatus == cfn.StackStatusDeleteComplete { + continue + } logger.Debug("stack = %#v", stack) summary, err := c.mapStackToNodeGroupSummary(stack) if err != nil { - return nil, errors.New("error mapping stack to node gorup summary") + return nil, errors.Wrap(err, "mapping stack to nodegorup summary") } - summaries = append(summaries, summary) + if name == "" { + summaries = append(summaries, summary) + } else if summary.Name == name { + summaries = append(summaries, summary) + } } return summaries, nil @@ -210,6 +213,9 @@ func getNodeGroupName(tags []*cfn.Tag) string { if *tag.Key == NodeGroupNameTag { return *tag.Value } + if *tag.Key == oldNodeGroupIDTag { + return *tag.Value + } } return "" } diff --git a/pkg/cfn/manager/tasks.go b/pkg/cfn/manager/tasks.go index c01fbbc2179..2f16a3e1099 100644 --- a/pkg/cfn/manager/tasks.go +++ b/pkg/cfn/manager/tasks.go @@ -4,10 +4,13 @@ import ( "sync" "github.com/kris-nova/logger" + "github.com/weaveworks/eksctl/pkg/eks/api" ) +type taskFunc func(chan error, interface{}) error + type task struct { - call func(chan error, interface{}) error + call taskFunc data interface{} } @@ -39,13 +42,13 @@ func Run(passError func(error), tasks ...task) { wg.Wait() } -// RunTask runs a single task with a proper error handling -func (s *StackCollection) RunTask(call func(chan error, interface{}) error, data interface{}) []error { +// RunSingleTask runs a task with a proper error handling +func (s *StackCollection) RunSingleTask(t task) []error { errs := []error{} appendErr := func(err error) { errs = append(errs, err) } - if Run(appendErr, task{call: call, data: data}); len(errs) > 0 { + if Run(appendErr, t); len(errs) > 0 { return errs } return nil @@ -56,33 +59,17 @@ func (s *StackCollection) RunTask(call func(chan error, interface{}) error, data // will be returned as a slice as soon as one of the tasks or group // of tasks is completed func (s *StackCollection) CreateClusterWithNodeGroups() []error { - errs := []error{} - appendErr := func(err error) { - errs = append(errs, err) - } - if Run(appendErr, task{s.CreateCluster, nil}); len(errs) > 0 { - return errs - } - - createAllNodeGroups := []task{} - for i := range s.spec.NodeGroups { - t := task{ - call: s.CreateNodeGroup, - data: s.spec.NodeGroups[i], - } - createAllNodeGroups = append(createAllNodeGroups, t) - } - if Run(appendErr, createAllNodeGroups...); len(errs) > 0 { + if errs := s.RunSingleTask(task{s.CreateCluster, nil}); len(errs) > 0 { return errs } - return nil + return s.CreateAllNodeGroups() } -// CreateNodeGroups runs all tasks required to create the node groups; +// CreateAllNodeGroups runs all tasks required to create the node groups; // any errors will be returned as a slice as soon as one of the tasks // or group of tasks is completed -func (s *StackCollection) CreateNodeGroups() []error { +func (s *StackCollection) CreateAllNodeGroups() []error { errs := []error{} appendErr := func(err error) { errs = append(errs, err) @@ -103,61 +90,55 @@ func (s *StackCollection) CreateNodeGroups() []error { return nil } -// deleteAllNodeGroupsTasks returns a list of tasks for deleting all the -// nodegroup stacks -func (s *StackCollection) deleteAllNodeGroupsTasks(call func(chan error, interface{}) error) ([]task, error) { - stacks, err := s.listAllNodeGroups() - if err != nil { - return nil, err - } - deleteAllNodeGroups := []task{} - for i := range stacks { - t := task{ - call: call, - data: stacks[i], - } - deleteAllNodeGroups = append(deleteAllNodeGroups, t) - } - return deleteAllNodeGroups, nil +// CreateOneNodeGroup runs a task to create a single node groups; +// any errors will be returned as a slice as soon as the tasks is +// completed +func (s *StackCollection) CreateOneNodeGroup(ng *api.NodeGroup) []error { + return s.RunSingleTask(task{ + call: s.CreateNodeGroup, + data: ng, + }) } -// DeleteAllNodeGroups runs all tasks required to delete all the nodegroup -// stacks; any errors will be returned as a slice as soon as the group -// of tasks is completed -func (s *StackCollection) DeleteAllNodeGroups() []error { - errs := []error{} - appendErr := func(err error) { - errs = append(errs, err) - } - - deleteAllNodeGroups, err := s.deleteAllNodeGroupsTasks(s.DeleteNodeGroup) +// DeleteAllNodeGroups deletes all nodegroups without waiting +func (s *StackCollection) DeleteAllNodeGroups(call taskFunc) []error { + nodeGroupStackNames, err := s.listAllNodeGroups() if err != nil { - appendErr(err) - return errs + return []error{err} } - if Run(appendErr, deleteAllNodeGroups...); len(errs) > 0 { - return errs + errs := []error{} + for _, stackName := range nodeGroupStackNames { + if err := s.DeleteNodeGroup(stackName); err != nil { + errs = append(errs, err) + } } - return nil + return errs } // WaitDeleteAllNodeGroups runs all tasks required to delete all the nodegroup -// stacks, it waits for each nodegroup to get deleted; any errors will be -// returned as a slice as soon as the group of tasks is completed +// stacks and wait for all nodegroups to be deleted; any errors will be returned +// as a slice as soon as the group of tasks is completed func (s *StackCollection) WaitDeleteAllNodeGroups() []error { + nodeGroupStackNames, err := s.listAllNodeGroups() + if err != nil { + return []error{err} + } + errs := []error{} appendErr := func(err error) { errs = append(errs, err) } - deleteAllNodeGroups, err := s.deleteAllNodeGroupsTasks(s.WaitDeleteNodeGroup) - if err != nil { - appendErr(err) - return errs + deleteAllNodeGroups := []task{} + for i := range nodeGroupStackNames { + t := task{ + call: s.WaitDeleteNodeGroup, + data: nodeGroupStackNames[i], + } + deleteAllNodeGroups = append(deleteAllNodeGroups, t) } - if Run(appendErr, deleteAllNodeGroups...); len(errs) > 0 { return errs } diff --git a/pkg/ctl/cmdutils/cmdutils.go b/pkg/ctl/cmdutils/cmdutils.go index 5a5dcd7643d..8c10a514421 100644 --- a/pkg/ctl/cmdutils/cmdutils.go +++ b/pkg/ctl/cmdutils/cmdutils.go @@ -27,7 +27,7 @@ func GetNameArg(args []string) string { } // AddCommonFlagsForAWS adds common flags for api.ProviderConfig -func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig) { +func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig, cfnRole bool) { group.InFlagSet("AWS client", func(fs *pflag.FlagSet) { fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") @@ -37,6 +37,9 @@ func AddCommonFlagsForAWS(group *NamedFlagSetGroup, p *api.ProviderConfig) { logger.Debug("ignoring error %q", err.Error()) } fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + if cfnRole { + fs.StringVar(&p.CloudFormationRoleARN, "cfn-role-arn", "", "IAM role used by CloudFormation to call AWS API on your behalf") + } }) } @@ -45,9 +48,9 @@ func AddRegionFlag(fs *pflag.FlagSet, p *api.ProviderConfig) { fs.StringVarP(&p.Region, "region", "r", "", "AWS region") } -// AddCFNRoleARNFlag adds common --cfn-role-arn flag -func AddCFNRoleARNFlag(fs *pflag.FlagSet, p *api.ProviderConfig) { - fs.StringVar(&p.CloudFormationRoleARN, "cfn-role-arn", "", "IAM role used by CloudFormation to call AWS API on your behalf") +// AddWaitFlag adds common --wait flag +func AddWaitFlag(wait *bool, fs *pflag.FlagSet) { + fs.BoolVarP(wait, "wait", "w", false, "Wait for deletion of all resources before exiting") } // AddCommonFlagsForKubeconfig adds common flags for controlling how output kubeconfig is written @@ -57,6 +60,12 @@ func AddCommonFlagsForKubeconfig(fs *pflag.FlagSet, outputPath *string, setConte fs.BoolVar(autoPath, "auto-kubeconfig", false, fmt.Sprintf("save kubeconfig file by cluster name, e.g. %q", kubeconfig.AutoPath(exampleName))) } +// AddCommonFlagsForGetCmd adds common flafs for get commands +func AddCommonFlagsForGetCmd(fs *pflag.FlagSet, chunkSize *int, outputMode *string) { + fs.IntVar(chunkSize, "chunk-size", 100, "return large lists in chunks rather than all at once, pass 0 to disable") + fs.StringVarP(outputMode, "output", "o", "table", "specifies the output format (valid option: table, json, yaml)") +} + // ErrUnsupportedRegion is a common error message func ErrUnsupportedRegion(p *api.ProviderConfig) error { return fmt.Errorf("--region=%s is not supported - use one of: %s", p.Region, strings.Join(api.SupportedRegions(), ", ")) diff --git a/pkg/ctl/completion/completion.go b/pkg/ctl/completion/completion.go index 37f0c5468dc..6f91851fbe2 100644 --- a/pkg/ctl/completion/completion.go +++ b/pkg/ctl/completion/completion.go @@ -3,8 +3,7 @@ package completion import ( "os" - "github.com/kubicorn/kubicorn/pkg/logger" - + "github.com/kris-nova/logger" "github.com/spf13/cobra" ) diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index 1b12877ad89..abe7e6b49f0 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -48,18 +48,18 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { group := g.New(cmd) exampleClusterName := utils.ClusterName("", "") + exampleNodeGroupName := utils.NodeGroupName("", "") group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", fmt.Sprintf("EKS cluster name (generated if unspecified, e.g. %q)", exampleClusterName)) fs.StringToStringVarP(&cfg.Metadata.Tags, "tags", "", map[string]string{}, `A list of KV pairs used to tag the AWS resources (e.g. "Owner=John Doe,Team=Some Team")`) cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) fs.StringSliceVar(&availabilityZones, "zones", nil, "(auto-select if unspecified)") fs.StringVar(&cfg.Metadata.Version, "version", api.LatestVersion, fmt.Sprintf("Kubernetes version (valid options: %s)", strings.Join(api.SupportedVersions(), ","))) }) group.InFlagSet("Initial nodegroup", func(fs *pflag.FlagSet) { - fs.StringVar(&ng.Name, "nodegroup", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") + fs.StringVar(&ng.Name, "nodegroup-name", "", fmt.Sprintf("name of the nodegroup (generated if unspecified, e.g. %q)", exampleNodeGroupName)) cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) }) @@ -79,7 +79,7 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { fs.StringVar(&kopsClusterNameForVPC, "vpc-from-kops-cluster", "", "re-use VPC from a given kops cluster") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.InFlagSet("Output kubeconfig", func(fs *pflag.FlagSet) { cmdutils.AddCommonFlagsForKubeconfig(fs, &kubeconfigPath, &setContext, &autoKubeconfigPath, exampleClusterName) @@ -109,9 +109,8 @@ func doCreateCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Node } meta.Name = utils.ClusterName(meta.Name, nameArg) - if ng.Name == "" { - ng.Name = utils.NodeGroupName() - } + // Use given name or generate one, no argument mode here + ng.Name = utils.NodeGroupName(ng.Name, "") if autoKubeconfigPath { if kubeconfigPath != kubeconfig.DefaultPath { diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go index 797ecd6f0b4..0649357cb99 100644 --- a/pkg/ctl/create/nodegroup.go +++ b/pkg/ctl/create/nodegroup.go @@ -3,14 +3,13 @@ package create import ( "fmt" "os" - "strings" - awseks "github.com/aws/aws-sdk-go/service/eks" - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" @@ -26,11 +25,7 @@ func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { Use: "nodegroup", Short: "Create a nodegroup", Run: func(_ *cobra.Command, args []string) { - name := cmdutils.GetNameArg(args) - if name != "" { - ng.Name = name - } - if err := doAddNodeGroup(p, cfg, ng); err != nil { + if err := doCreateNodeGroup(p, cfg, ng, cmdutils.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -39,26 +34,27 @@ func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { group := g.New(cmd) + exampleNodeGroupName := utils.NodeGroupName("", "") + group.InFlagSet("General", func(fs *pflag.FlagSet) { - fs.StringVar(&cfg.Metadata.Name, "cluster", "", "Name of the EKS cluster to add the nodegroup to") + fs.StringVar(&cfg.Metadata.Name, "cluster", "", "name of the EKS cluster to add the nodegroup to") cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) fs.StringVar(&cfg.Metadata.Version, "version", api.LatestVersion, fmt.Sprintf("Kubernetes version (valid options: %s)", strings.Join(api.SupportedVersions(), ","))) }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") + group.InFlagSet("New nodegroup", func(fs *pflag.FlagSet) { + fs.StringVarP(&ng.Name, "name", "n", "", fmt.Sprintf("name of the new nodegroup (generated if unspecified, e.g. %q)", exampleNodeGroupName)) cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) return cmd } -func doAddNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { +func doCreateNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { ctl := eks.New(p, cfg) meta := cfg.Metadata @@ -72,15 +68,18 @@ func doAddNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeG } if cfg.Metadata.Name == "" { - return errors.New("--cluster must be specified. run `eksctl get cluster` to show existing clusters") + return errors.New("--cluster must be set") } + if utils.NodeGroupName(ng.Name, nameArg) == "" { + return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) + } + ng.Name = utils.NodeGroupName(ng.Name, nameArg) + if ng.SSHPublicKeyPath == "" { return fmt.Errorf("--ssh-public-key must be non-empty string") } - //TODO: do we need to do the AZ stuff from create???? - if err := ctl.EnsureAMI(meta.Version, ng); err != nil { return err } @@ -91,31 +90,14 @@ func doAddNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeG logger.Debug("cfg = %#v", cfg) - //TODO: is this check needed???? - // Check the cluster exists and is active - eksCluster, err := ctl.DescribeControlPlane(cfg.Metadata) - if err != nil { - return err - } - if *eksCluster.Status != awseks.ClusterStatusActive { - return fmt.Errorf("cluster %s status is %s, its needs to be active to add a nodegroup", *eksCluster.Name, *eksCluster.Status) - } - logger.Info("found cluster %s", *eksCluster.Name) - logger.Debug("cluster = %#v", eksCluster) - - // Populate cfg with the endopoint, CA data, and so on obtained from the described control-plane - // So that we won't end up rendering a incomplete useradata missing those things - if err = ctl.GetCredentials(*eksCluster, cfg); err != nil { - return err + if err := ctl.GetCredentials(cfg); err != nil { + return errors.Wrapf(err, "getting credentials for cluster %q", cfg.Metadata.Name) } { stackManager := ctl.NewStackManager(cfg) - if ng.Name == "" { - ng.Name = utils.NodeGroupName() - } - logger.Info("will create a Cloudformation stack for nodegroup %s for cluster %s", ng.Name, cfg.Metadata.Name) - errs := stackManager.RunTask(stackManager.CreateNodeGroup, ng) + logger.Info("will create a Cloudformation stack for nodegroup %s in cluster %s", ng.Name, cfg.Metadata.Name) + errs := stackManager.CreateOneNodeGroup(ng) if len(errs) > 0 { logger.Info("%d error(s) occurred and nodegroup hasn't been created properly, you may wish to check CloudFormation console", len(errs)) logger.Info("to cleanup resources, run 'eksctl delete nodegroup %s --region=%s --name=%s'", ng.Name, cfg.Metadata.Region, cfg.Metadata.Name) @@ -151,7 +133,7 @@ func doAddNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeG return err } } - logger.Success("EKS cluster %q in %q region has a new nodegroup with name %d", cfg.Metadata.Name, cfg.Metadata.Region, ng.Name) + logger.Success("created nodegroup %q in cluster %q", cfg.Metadata.Name, ng.Name) return nil diff --git a/pkg/ctl/delete/cluster.go b/pkg/ctl/delete/cluster.go index 8df56de4e44..1ad622a4ea3 100644 --- a/pkg/ctl/delete/cluster.go +++ b/pkg/ctl/delete/cluster.go @@ -34,11 +34,10 @@ func deleteClusterCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name (required)") cmdutils.AddRegionFlag(fs, p) - cmdutils.AddCFNRoleARNFlag(fs, p) - fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") + cmdutils.AddWaitFlag(&wait, fs) }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) return cmd @@ -94,7 +93,7 @@ func doDeleteCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, nameArg stri } var clusterErr bool - if waitDelete { + if wait { clusterErr = handleIfError(stackManager.WaitDeleteCluster(), "cluster") } else { clusterErr = handleIfError(stackManager.DeleteCluster(), "cluster") @@ -102,13 +101,13 @@ func doDeleteCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, nameArg stri if clusterErr { if handleIfError(ctl.DeprecatedDeleteControlPlane(cfg.Metadata), "control plane") { - handleIfError(stackManager.DeprecatedDeleteStackControlPlane(waitDelete), "stack control plane (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackControlPlane(wait), "stack control plane (deprecated)") } } - handleIfError(stackManager.DeprecatedDeleteStackServiceRole(waitDelete), "service group (deprecated)") - handleIfError(stackManager.DeprecatedDeleteStackVPC(waitDelete), "stack VPC (deprecated)") - handleIfError(stackManager.DeprecatedDeleteStackDefaultNodeGroup(waitDelete), "default nodegroup (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackServiceRole(wait), "service group (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackVPC(wait), "stack VPC (deprecated)") + handleIfError(stackManager.DeprecatedDeleteStackDefaultNodeGroup(wait), "default nodegroup (deprecated)") ctl.MaybeDeletePublicSSHKey(cfg.Metadata.Name) diff --git a/pkg/ctl/delete/delete.go b/pkg/ctl/delete/delete.go index 22fa8f14d3c..be6952b9bf4 100644 --- a/pkg/ctl/delete/delete.go +++ b/pkg/ctl/delete/delete.go @@ -7,7 +7,7 @@ import ( ) var ( - waitDelete bool + wait bool ) // Command will create the `delete` commands diff --git a/pkg/ctl/delete/nodegroup.go b/pkg/ctl/delete/nodegroup.go index 95f7bf895e5..4eeca96d94a 100644 --- a/pkg/ctl/delete/nodegroup.go +++ b/pkg/ctl/delete/nodegroup.go @@ -4,11 +4,11 @@ import ( "fmt" "os" - "errors" - - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" @@ -20,42 +20,33 @@ func deleteNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { ng := cfg.NewNodeGroup() cmd := &cobra.Command{ - Use: "nodegroup NAME", + Use: "nodegroup", Short: "Delete a nodegroup", - Args: cobra.MinimumNArgs(1), - RunE: func(_ *cobra.Command, args []string) error { - name := cmdutils.GetNameArg(args) - if name != "" { - ng.Name = name - } - if err := doDeleteNodeGroup(p, cfg, ng.Name); err != nil { + Run: func(_ *cobra.Command, args []string) { + if err := doDeleteNodeGroup(p, cfg, ng, cmdutils.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } - return nil }, } - group := &cmdutils.NamedFlagSetGroup{} + group := g.New(cmd) group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name (required)") cmdutils.AddRegionFlag(fs, p) - fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup to delete (required)") + cmdutils.AddWaitFlag(&wait, fs) }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") - }) - - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, true) group.AddTo(cmd) return cmd } -func doDeleteNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { +func doDeleteNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { @@ -66,37 +57,38 @@ func doDeleteNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, name strin return errors.New("--cluster must be set") } - logger.Info("deleting EKS nodegroup %q-nodegroup-%s", cfg.Metadata.Name, name) + if ng.Name != "" && nameArg != "" { + return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) + } - var deletedResources []string + if nameArg != "" { + ng.Name = nameArg + } - handleIfError := func(err error, name string) bool { - if err != nil { - logger.Debug("continue despite error: %v", err) - return true - } - logger.Debug("deleted %q", name) - deletedResources = append(deletedResources, name) - return false + if ng.Name == "" { + return fmt.Errorf("--name must be set") } - // We can remove all 'DeprecatedDelete*' calls in 0.2.0 + logger.Info("deleting nodegroup %q in cluster %q", ng.Name, cfg.Metadata.Name) stackManager := ctl.NewStackManager(cfg) { - err := stackManager.WaitDeleteNodeGroup(nil, name) - errs := []error{err} - if len(errs) > 0 { - logger.Info("%d error(s) occurred while deleting nodegroup(s)", len(errs)) - for _, err := range errs { - if err != nil { - logger.Critical("%s\n", err.Error()) - } - } - handleIfError(fmt.Errorf("failed to delete nodegroup(s)"), "nodegroup(s)") + var ( + err error + verb string + ) + if wait { + err = stackManager.BlockingWaitDeleteNodeGroup(ng.Name) + verb = "was" + } else { + err = stackManager.DeleteNodeGroup(ng.Name) + verb = "will be" + } + if err != nil { + return errors.Wrapf(err, "failed to delete nodegroup %q", ng.Name) } - logger.Debug("all nodegroups were deleted") + logger.Success("nodegroup %q %s deleted", ng.Name, verb) } return nil diff --git a/pkg/ctl/get/cluster.go b/pkg/ctl/get/cluster.go index 78417e626a3..dd7647db4ac 100644 --- a/pkg/ctl/get/cluster.go +++ b/pkg/ctl/get/cluster.go @@ -35,14 +35,12 @@ func getClusterCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") fs.BoolVarP(&listAllRegions, "all-regions", "A", false, "List clusters across all supported regions") - fs.IntVar(&chunkSize, "chunk-size", defaultChunkSize, "Return large lists in chunks rather than all at once. Pass 0 to disable.") - - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") - - fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") + cmdutils.AddRegionFlag(fs, p) + cmdutils.AddCommonFlagsForGetCmd(fs, &chunkSize, &output) }) + cmdutils.AddCommonFlagsForAWS(group, p, false) + group.AddTo(cmd) return cmd diff --git a/pkg/ctl/get/get.go b/pkg/ctl/get/get.go index d7e05b485b4..86b9410e9f7 100644 --- a/pkg/ctl/get/get.go +++ b/pkg/ctl/get/get.go @@ -6,10 +6,6 @@ import ( "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" ) -const ( - defaultChunkSize = 100 -) - var ( chunkSize int output string diff --git a/pkg/ctl/get/nodegroup.go b/pkg/ctl/get/nodegroup.go index 95af6d3c01b..827d65c1d44 100644 --- a/pkg/ctl/get/nodegroup.go +++ b/pkg/ctl/get/nodegroup.go @@ -5,16 +5,16 @@ import ( "strconv" "time" - "github.com/weaveworks/eksctl/pkg/cfn/manager" - - "github.com/kubicorn/kubicorn/pkg/logger" + "github.com/kris-nova/logger" "github.com/pkg/errors" "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/weaveworks/eksctl/pkg/cfn/manager" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/printers" - "github.com/spf13/pflag" ) func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { @@ -27,11 +27,7 @@ func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { Short: "Get nodegroups(s)", Aliases: []string{"nodegroups"}, Run: func(_ *cobra.Command, args []string) { - name := cmdutils.GetNameArg(args) - if name != "" { - ng.Name = name - } - if err := doGetNodegroups(p, cfg, ng.Name); err != nil { + if err := doGetNodegroups(p, cfg, ng, cmdutils.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -42,23 +38,19 @@ func getNodegroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name") - - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") - - fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup") + cmdutils.AddRegionFlag(fs, p) + cmdutils.AddCommonFlagsForGetCmd(fs, &chunkSize, &output) }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") - }) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd } -func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { +func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { @@ -69,8 +61,16 @@ func doGetNodegroups(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) return errors.New("--cluster must be set") } + if ng.Name != "" && nameArg != "" { + return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) + } + + if nameArg != "" { + ng.Name = nameArg + } + manager := ctl.NewStackManager(cfg) - summaries, err := manager.GetNodeGroupSummaries() + summaries, err := manager.GetNodeGroupSummaries(ng.Name) if err != nil { return errors.Wrap(err, "getting nodegroup stack summaries") } diff --git a/pkg/ctl/scale/nodegroup.go b/pkg/ctl/scale/nodegroup.go index 60f1733eb91..5a74866f1ba 100644 --- a/pkg/ctl/scale/nodegroup.go +++ b/pkg/ctl/scale/nodegroup.go @@ -1,6 +1,7 @@ package scale import ( + "errors" "fmt" "os" @@ -18,18 +19,13 @@ func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { ng := cfg.NewNodeGroup() cmd := &cobra.Command{ - Use: "nodegroup NAME", + Use: "nodegroup", Short: "Scale a nodegroup", - RunE: func(_ *cobra.Command, args []string) error { - name := cmdutils.GetNameArg(args) - if name != "" { - ng.Name = name - } - if err := doScaleNodeGroup(p, cfg, ng); err != nil { + Run: func(_ *cobra.Command, args []string) { + if err := doScaleNodeGroup(p, cfg, ng, cmdutils.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } - return nil }, } @@ -37,25 +33,21 @@ func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name") + fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup to scale") fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") - fs.StringVarP(&p.Region, "region", "r", "", "AWS region") - fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") - - fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + cmdutils.AddRegionFlag(fs, p) }) - group.InFlagSet("Nodegroup", func(fs *pflag.FlagSet) { - fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup. Generated if unset, e.g. \"ng-a345f4\"") - }) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd } -func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { +func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { @@ -63,7 +55,19 @@ func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Nod } if cfg.Metadata.Name == "" { - return fmt.Errorf("no cluster name supplied. Use the --name= flag") + return errors.New("--cluster must be set") + } + + if ng.Name != "" && nameArg != "" { + return cmdutils.ErrNameFlagAndArg(ng.Name, nameArg) + } + + if nameArg != "" { + ng.Name = nameArg + } + + if ng.Name == "" { + return fmt.Errorf("--name must be set") } if ng.DesiredCapacity < 0 { diff --git a/pkg/ctl/utils/describe_stacks.go b/pkg/ctl/utils/describe_stacks.go index 786e2242672..17da55144a0 100644 --- a/pkg/ctl/utils/describe_stacks.go +++ b/pkg/ctl/utils/describe_stacks.go @@ -43,7 +43,7 @@ func describeStacksCmd(g *cmdutils.Grouping) *cobra.Command { fs.BoolVar(&describeStacksEvents, "events", false, "include stack events") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd diff --git a/pkg/ctl/utils/write_kubeconfig.go b/pkg/ctl/utils/write_kubeconfig.go index a8525ba01ef..2b9092fd509 100644 --- a/pkg/ctl/utils/write_kubeconfig.go +++ b/pkg/ctl/utils/write_kubeconfig.go @@ -47,7 +47,7 @@ func writeKubeconfigCmd(g *cmdutils.Grouping) *cobra.Command { cmdutils.AddCommonFlagsForKubeconfig(fs, &writeKubeconfigOutputPath, &writeKubeconfigSetContext, &writeKubeconfigAutoPath, "") }) - cmdutils.AddCommonFlagsForAWS(group, p) + cmdutils.AddCommonFlagsForAWS(group, p, false) group.AddTo(cmd) return cmd @@ -79,14 +79,7 @@ func doWriteKubeconfigCmd(p *api.ProviderConfig, cfg *api.ClusterConfig, nameArg writeKubeconfigOutputPath = kubeconfig.AutoPath(cfg.Metadata.Name) } - cluster, err := ctl.DescribeControlPlane(cfg.Metadata) - if err != nil { - return err - } - - logger.Debug("cluster = %#v", cluster) - - if err = ctl.GetCredentials(*cluster, cfg); err != nil { + if err := ctl.GetCredentials(cfg); err != nil { return err } diff --git a/pkg/eks/api/api.go b/pkg/eks/api/api.go index 6c0d274a17b..8788c32cb06 100644 --- a/pkg/eks/api/api.go +++ b/pkg/eks/api/api.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/sts/stsiface" - "github.com/weaveworks/eksctl/pkg/utils" ) const ( @@ -170,9 +169,7 @@ func (c *ClusterConfig) AppendAvailabilityZone(newAZ string) { // NewNodeGroup creates new nodegroup inside cluster config, // it returns pointer to the nodegroup for convenience func (c *ClusterConfig) NewNodeGroup() *NodeGroup { - name := utils.NodeGroupName() ng := &NodeGroup{ - Name: name, PrivateNetworking: false, } diff --git a/pkg/eks/eks.go b/pkg/eks/eks.go index 867dbd07e50..12ba4b08bbe 100644 --- a/pkg/eks/eks.go +++ b/pkg/eks/eks.go @@ -46,7 +46,17 @@ func (c *ClusterProvider) DeprecatedDeleteControlPlane(cl *api.ClusterMeta) erro } // GetCredentials retrieves the certificate authority data -func (c *ClusterProvider) GetCredentials(cluster awseks.Cluster, spec *api.ClusterConfig) error { +func (c *ClusterProvider) GetCredentials(spec *api.ClusterConfig) error { + // Check the cluster exists and is active + cluster, err := c.DescribeControlPlane(spec.Metadata) + if err != nil { + return err + } + if *cluster.Status != awseks.ClusterStatusActive { + return fmt.Errorf("status of cluster %q is %q, has to be %q", *cluster.Name, *cluster.Status, awseks.ClusterStatusActive) + } + logger.Debug("cluster = %#v", cluster) + spec.Endpoint = *cluster.Endpoint data, err := base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data) diff --git a/pkg/eks/nodegroup.go b/pkg/eks/nodegroup.go index bfa6c19633b..4bdfbc96e01 100644 --- a/pkg/eks/nodegroup.go +++ b/pkg/eks/nodegroup.go @@ -99,12 +99,12 @@ func isNodeReady(node *corev1.Node) bool { return false } -func getNodes(clientSet *clientset.Clientset) (int, error) { +func getNodes(clientSet *clientset.Clientset, ng *api.NodeGroup) (int, error) { nodes, err := clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return 0, err } - logger.Info("the cluster has %d nodes", len(nodes.Items)) + logger.Info("nodegroup %q has %d nodes", ng.Name, len(nodes.Items)) for _, node := range nodes.Items { // logger.Debug("node[%d]=%#v", n, node) ready := "not ready" @@ -128,7 +128,7 @@ func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.N return errors.Wrap(err, "creating node watcher") } - counter, err := getNodes(clientSet) + counter, err := getNodes(clientSet, ng) if err != nil { return errors.Wrap(err, "listing nodes") } @@ -157,7 +157,7 @@ func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.N return fmt.Errorf("timed out (after %s) waitiing for at least %d nodes to join the cluster and become ready", c.Provider.WaitTimeout(), ng.MinSize) } - if _, err = getNodes(clientSet); err != nil { + if _, err = getNodes(clientSet, ng); err != nil { return errors.Wrap(err, "re-listing nodes") } diff --git a/pkg/utils/namer.go b/pkg/utils/namer.go new file mode 100644 index 00000000000..e5940d90868 --- /dev/null +++ b/pkg/utils/namer.go @@ -0,0 +1,55 @@ +package utils + +import ( + "fmt" + "math/rand" + "time" + + "github.com/kubicorn/kubicorn/pkg/namer" +) + +func useNameOrGenerate(a, b string, generate func() string) string { + if a != "" && b != "" { + return "" + } + if a != "" { + return a + } + if b != "" { + return b + } + return generate() +} + +// ClusterName generates a name string when a and b are empty strings. +// If either a or b are non-empty, it returns whichever is non-empty. +// If neither a nor b are empty, it returns empty name, to indicate +// ambiguous usage. +func ClusterName(a, b string) string { + return useNameOrGenerate(a, b, func() string { + return fmt.Sprintf("%s-%d", namer.RandomName(), time.Now().Unix()) + }) +} + +var r = rand.New(rand.NewSource(time.Now().UnixNano())) + +const ( + randNodeGroupNameLength = 8 + randNodeGroupNameComponents = "abcdef0123456789" +) + +// NodeGroupName generates a name string when a and b are empty strings. +// If either a or b are non-empty, it returns whichever is non-empty. +// If neither a nor b are empty, it returns empty name, to indicate +// ambiguous usage. +// It uses a different naming scheme from ClusterName, so that users can +// easily distinguish a cluster name from nodegroup name. +func NodeGroupName(a, b string) string { + return useNameOrGenerate(a, b, func() string { + name := make([]byte, randNodeGroupNameLength) + for i := 0; i < randNodeGroupNameLength; i++ { + name[i] = randNodeGroupNameComponents[r.Intn(len(randNodeGroupNameComponents))] + } + return fmt.Sprintf("ng-%s", string(name)) + }) +} diff --git a/pkg/utils/nodegroup_name.go b/pkg/utils/nodegroup_name.go deleted file mode 100644 index 7ae41281260..00000000000 --- a/pkg/utils/nodegroup_name.go +++ /dev/null @@ -1,23 +0,0 @@ -package utils - -import ( - "fmt" - "math/rand" - "time" -) - -var r = rand.New(rand.NewSource(time.Now().UnixNano())) - -const ( - randNodeGroupNameLength = 8 - randNodeGroupNameComponents = "abcdef0123456789" -) - -// NodeGroupName generates a random hex string with the fixed length of 8 -func NodeGroupName() string { - b := make([]byte, randNodeGroupNameLength) - for i := 0; i < randNodeGroupNameLength; i++ { - b[i] = randNodeGroupNameComponents[r.Intn(len(randNodeGroupNameComponents))] - } - return fmt.Sprintf("ng-%s", string(b)) -} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 72e73e4aeec..0e7e69ccfb5 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -1,12 +1,9 @@ package utils import ( - "fmt" "os" "strings" - "time" - "github.com/kubicorn/kubicorn/pkg/namer" kopsutils "k8s.io/kops/upup/pkg/fi/utils" ) @@ -16,23 +13,6 @@ func IsGPUInstanceType(instanceType string) bool { return strings.HasPrefix(instanceType, "p2") || strings.HasPrefix(instanceType, "p3") } -// ClusterName generates a name string when a and b are empty strings. -// If either a or b are non-empty, it returns whichever is non-empty. -// If neither a nor b are empty, it returns empty name, to indicate -// ambiguous usage. -func ClusterName(a, b string) string { - if a != "" && b != "" { - return "" - } - if a != "" { - return a - } - if b != "" { - return b - } - return fmt.Sprintf("%s-%d", namer.RandomName(), time.Now().Unix()) -} - // FileExists checks to see if a file exists. func FileExists(path string) (bool, error) { _, err := os.Stat(path)