diff --git a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider.go b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider.go index 421e2b5af1de..a9d671b1da5b 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider.go @@ -17,8 +17,10 @@ limitations under the License. package digitalocean import ( + "fmt" "io" "os" + "strings" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -33,6 +35,8 @@ var _ cloudprovider.CloudProvider = (*digitaloceanCloudProvider)(nil) const ( // GPULabel is the label added to nodes with GPU resource. GPULabel = "cloud.digitalocean.com/gpu-node" + + doProviderIDPrefix = "digitalocean://" ) // digitaloceanCloudProvider implements CloudProvider interface. @@ -70,13 +74,8 @@ func (d *digitaloceanCloudProvider) NodeGroups() []cloudprovider.NodeGroup { // should not be processed by cluster autoscaler, or non-nil error if such // occurred. Must be implemented. func (d *digitaloceanCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { - nodeID, ok := node.Labels[nodeIDLabel] - if !ok { - // CA creates fake node objects to represent upcoming VMs that haven't - // registered as nodes yet. They have node.Spec.ProviderID set. Use - // that as nodeID. - nodeID = node.Spec.ProviderID - } + providerID := node.Spec.ProviderID + nodeID := toNodeID(providerID) klog.V(5).Infof("checking nodegroup for node ID: %q", nodeID) @@ -91,8 +90,10 @@ func (d *digitaloceanCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudpro } for _, node := range nodes { - klog.V(6).Infof("checking node has: %q want: %q", node.Id, nodeID) - if node.Id != nodeID { + klog.V(6).Infof("checking node has: %q want: %q", node.Id, providerID) + // CA uses node.Spec.ProviderID when looking for (un)registered nodes, + // so we need to use it here too. + if node.Id != providerID { continue } @@ -191,3 +192,13 @@ func BuildDigitalOcean( return provider } + +// toProviderID returns a provider ID from the given node ID. +func toProviderID(nodeID string) string { + return fmt.Sprintf("%s%s", doProviderIDPrefix, nodeID) +} + +// toNodeID returns a node or droplet ID from the given provider ID. +func toNodeID(providerID string) string { + return strings.TrimPrefix(providerID, doProviderIDPrefix) +} diff --git a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider_test.go index acfd849f9637..5b6d08a3131a 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_cloud_provider_test.go @@ -22,9 +22,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/digitalocean/godo" ) @@ -131,16 +131,16 @@ func TestDigitalOceanCloudProvider_NodeGroupForNode(t *testing.T) { { ID: "1", Nodes: []*godo.KubernetesNode{ - {ID: "2", Status: &godo.KubernetesNodeStatus{State: "deleting"}}, - {ID: "3", Status: &godo.KubernetesNodeStatus{State: "running"}}, + {ID: "2", Status: &godo.KubernetesNodeStatus{State: "deleting"}, DropletID: "droplet-2"}, + {ID: "3", Status: &godo.KubernetesNodeStatus{State: "running"}, DropletID: "droplet-3"}, }, AutoScale: true, }, { ID: "2", Nodes: []*godo.KubernetesNode{ - {ID: "4", Status: &godo.KubernetesNodeStatus{State: "provisioning"}}, - {ID: "5", Status: &godo.KubernetesNodeStatus{State: "draining"}}, + {ID: "4", Status: &godo.KubernetesNodeStatus{State: "provisioning"}, DropletID: "droplet-4"}, + {ID: "5", Status: &godo.KubernetesNodeStatus{State: "draining"}, DropletID: "droplet-5"}, }, AutoScale: true, }, @@ -153,17 +153,15 @@ func TestDigitalOceanCloudProvider_NodeGroupForNode(t *testing.T) { // let's get the nodeGroup for the node with ID 4 node := &apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - nodeIDLabel: "4", - }, + Spec: apiv1.NodeSpec{ + ProviderID: toProviderID("droplet-4"), }, } nodeGroup, err := provider.NodeGroupForNode(node) - assert.NoError(t, err) - assert.NotNil(t, nodeGroup) - assert.Equal(t, nodeGroup.Id(), "2", "node group ID does not match") + require.NoError(t, err) + require.NotNil(t, nodeGroup) + require.Equal(t, nodeGroup.Id(), "2", "node group ID does not match") }) t.Run("node does not exist", func(t *testing.T) { @@ -175,15 +173,15 @@ func TestDigitalOceanCloudProvider_NodeGroupForNode(t *testing.T) { { ID: "1", Nodes: []*godo.KubernetesNode{ - {ID: "2", Status: &godo.KubernetesNodeStatus{State: "deleting"}}, - {ID: "3", Status: &godo.KubernetesNodeStatus{State: "running"}}, + {ID: "2", Status: &godo.KubernetesNodeStatus{State: "deleting"}, DropletID: "droplet-2"}, + {ID: "3", Status: &godo.KubernetesNodeStatus{State: "running"}, DropletID: "droplet-3"}, }, }, { ID: "2", Nodes: []*godo.KubernetesNode{ - {ID: "4", Status: &godo.KubernetesNodeStatus{State: "provisioning"}}, - {ID: "5", Status: &godo.KubernetesNodeStatus{State: "draining"}}, + {ID: "4", Status: &godo.KubernetesNodeStatus{State: "provisioning"}, DropletID: "droplet-4"}, + {ID: "5", Status: &godo.KubernetesNodeStatus{State: "draining"}, DropletID: "droplet-5"}, }, }, }, @@ -194,10 +192,8 @@ func TestDigitalOceanCloudProvider_NodeGroupForNode(t *testing.T) { provider := testCloudProvider(t, client) node := &apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - nodeIDLabel: "7", - }, + Spec: apiv1.NodeSpec{ + ProviderID: toProviderID("droplet-7"), }, } diff --git a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go index 3fc553a8a450..94ac7148c202 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group.go @@ -233,9 +233,9 @@ func (n *NodeGroup) Autoprovisioned() bool { // toInstances converts a slice of *godo.KubernetesNode to // cloudprovider.Instance func toInstances(nodes []*godo.KubernetesNode) []cloudprovider.Instance { - instances := make([]cloudprovider.Instance, len(nodes)) - for i, nd := range nodes { - instances[i] = toInstance(nd) + instances := make([]cloudprovider.Instance, 0, len(nodes)) + for _, nd := range nodes { + instances = append(instances, toInstance(nd)) } return instances } @@ -244,7 +244,7 @@ func toInstances(nodes []*godo.KubernetesNode) []cloudprovider.Instance { // cloudprovider.Instance func toInstance(node *godo.KubernetesNode) cloudprovider.Instance { return cloudprovider.Instance{ - Id: node.ID, + Id: toProviderID(node.DropletID), Status: toInstanceStatus(node.Status), } } diff --git a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group_test.go b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group_test.go index ea3f7a377210..854003a79b93 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group_test.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/digitalocean_node_group_test.go @@ -283,18 +283,21 @@ func TestNodeGroup_Nodes(t *testing.T) { Status: &godo.KubernetesNodeStatus{ State: "provisioning", }, + DropletID: "droplet-1", }, { ID: "2", Status: &godo.KubernetesNodeStatus{ State: "running", }, + DropletID: "droplet-2", }, { ID: "3", Status: &godo.KubernetesNodeStatus{ State: "deleting", }, + DropletID: "droplet-3", }, { ID: "4", @@ -302,10 +305,12 @@ func TestNodeGroup_Nodes(t *testing.T) { State: "unknown", Message: "some-message", }, + DropletID: "droplet-4", }, { // no status - ID: "5", + ID: "5", + DropletID: "droplet-5", }, }, Count: 5, @@ -313,25 +318,25 @@ func TestNodeGroup_Nodes(t *testing.T) { exp := []cloudprovider.Instance{ { - Id: "1", + Id: "digitalocean://droplet-1", Status: &cloudprovider.InstanceStatus{ State: cloudprovider.InstanceCreating, }, }, { - Id: "2", + Id: "digitalocean://droplet-2", Status: &cloudprovider.InstanceStatus{ State: cloudprovider.InstanceRunning, }, }, { - Id: "3", + Id: "digitalocean://droplet-3", Status: &cloudprovider.InstanceStatus{ State: cloudprovider.InstanceDeleting, }, }, { - Id: "4", + Id: "digitalocean://droplet-4", Status: &cloudprovider.InstanceStatus{ ErrorInfo: &cloudprovider.InstanceErrorInfo{ ErrorClass: cloudprovider.OtherErrorClass, @@ -341,7 +346,7 @@ func TestNodeGroup_Nodes(t *testing.T) { }, }, { - Id: "5", + Id: "digitalocean://droplet-5", }, } diff --git a/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes.go b/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes.go index 97ff43a291ad..35fcd19af6dd 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes.go @@ -311,9 +311,10 @@ type KubernetesNodePool struct { // KubernetesNode represents a Node in a node pool in a Kubernetes cluster. type KubernetesNode struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Status *KubernetesNodeStatus `json:"status,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Status *KubernetesNodeStatus `json:"status,omitempty"` + DropletID string `json:"droplet_id,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` UpdatedAt time.Time `json:"updated_at,omitempty"` diff --git a/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes_test.go b/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes_test.go index 035145d3b5f5..1d9471c5e6be 100644 --- a/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes_test.go +++ b/cluster-autoscaler/cloudprovider/digitalocean/godo/kubernetes_test.go @@ -18,7 +18,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { kubeSvc := client.Kubernetes want := []*KubernetesCluster{ - { + &KubernetesCluster{ ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8f", Name: "blablabla", RegionSlug: "nyc1", @@ -42,6 +42,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { ID: "", Name: "", Status: &KubernetesNodeStatus{}, + DropletID: "droplet-1", CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), }, @@ -49,6 +50,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { ID: "", Name: "", Status: &KubernetesNodeStatus{}, + DropletID: "droplet-2", CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), }, @@ -58,7 +60,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC), }, - { + &KubernetesCluster{ ID: "deadbeef-dead-4aa5-beef-deadbeef347d", Name: "antoine", RegionSlug: "nyc1", @@ -81,6 +83,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { ID: "deadbeef-dead-beef-dead-deadbeefb4b1", Name: "worker-393", Status: &KubernetesNodeStatus{State: "running"}, + DropletID: "droplet-3", CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC), }, @@ -88,6 +91,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { ID: "deadbeef-dead-beef-dead-deadbeefb4b2", Name: "worker-394", Status: &KubernetesNodeStatus{State: "running"}, + DropletID: "droplet-4", CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC), }, @@ -129,6 +133,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { "status": { "state": "" }, + "droplet_id": "droplet-1", "created_at": "2018-06-21T08:44:38Z", "updated_at": "2018-06-21T08:44:38Z" }, @@ -138,6 +143,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { "status": { "state": "" }, + "droplet_id": "droplet-2", "created_at": "2018-06-21T08:44:38Z", "updated_at": "2018-06-21T08:44:38Z" } @@ -175,15 +181,17 @@ func TestKubernetesClusters_ListClusters(t *testing.T) { "status": { "state": "running" }, + "droplet_id": "droplet-3", "created_at": "2018-06-15T07:10:23Z", "updated_at": "2018-06-15T07:11:26Z" }, { "id": "deadbeef-dead-beef-dead-deadbeefb4b2", "name": "worker-394", - "status": { - "state": "running" - }, + "status": { + "state": "running" + }, + "droplet_id": "droplet-4", "created_at": "2018-06-15T07:10:23Z", "updated_at": "2018-06-15T07:11:26Z" } @@ -234,6 +242,7 @@ func TestKubernetesClusters_Get(t *testing.T) { ID: "deadbeef-dead-beef-dead-deadbeefb4b1", Name: "worker-393", Status: &KubernetesNodeStatus{State: "running"}, + DropletID: "droplet-1", CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC), }, @@ -241,6 +250,7 @@ func TestKubernetesClusters_Get(t *testing.T) { ID: "deadbeef-dead-beef-dead-deadbeefb4b2", Name: "worker-394", Status: &KubernetesNodeStatus{State: "running"}, + DropletID: "droplet-2", CreatedAt: time.Date(2018, 6, 15, 7, 10, 23, 0, time.UTC), UpdatedAt: time.Date(2018, 6, 15, 7, 11, 26, 0, time.UTC), }, @@ -284,15 +294,17 @@ func TestKubernetesClusters_Get(t *testing.T) { "status": { "state": "running" }, + "droplet_id": "droplet-1", "created_at": "2018-06-15T07:10:23Z", "updated_at": "2018-06-15T07:11:26Z" }, { "id": "deadbeef-dead-beef-dead-deadbeefb4b2", "name": "worker-394", - "status": { - "state": "running" - }, + "status": { + "state": "running" + }, + "droplet_id": "droplet-2", "created_at": "2018-06-15T07:10:23Z", "updated_at": "2018-06-15T07:11:26Z" } @@ -443,7 +455,7 @@ func TestKubernetesClusters_Create(t *testing.T) { Tags: []string{"cluster-tag-1", "cluster-tag-2"}, VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6", NodePools: []*KubernetesNodePool{ - { + &KubernetesNodePool{ ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a", Size: "s-1vcpu-1gb", Count: 2, @@ -463,7 +475,7 @@ func TestKubernetesClusters_Create(t *testing.T) { Tags: want.Tags, VPCUUID: want.VPCUUID, NodePools: []*KubernetesNodePoolCreateRequest{ - { + &KubernetesNodePoolCreateRequest{ Size: want.NodePools[0].Size, Count: want.NodePools[0].Count, Name: want.NodePools[0].Name, @@ -541,7 +553,7 @@ func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) { Tags: []string{"cluster-tag-1", "cluster-tag-2"}, VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6", NodePools: []*KubernetesNodePool{ - { + &KubernetesNodePool{ ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a", Size: "s-1vcpu-1gb", Count: 2, @@ -564,7 +576,7 @@ func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) { Tags: want.Tags, VPCUUID: want.VPCUUID, NodePools: []*KubernetesNodePoolCreateRequest{ - { + &KubernetesNodePoolCreateRequest{ Size: want.NodePools[0].Size, Count: want.NodePools[0].Count, Name: want.NodePools[0].Name, @@ -645,7 +657,7 @@ func TestKubernetesClusters_Update(t *testing.T) { Tags: []string{"cluster-tag-1", "cluster-tag-2"}, VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6", NodePools: []*KubernetesNodePool{ - { + &KubernetesNodePool{ ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a", Size: "s-1vcpu-1gb", Count: 2, @@ -734,7 +746,7 @@ func TestKubernetesClusters_Update_FalseAutoUpgrade(t *testing.T) { Tags: []string{"cluster-tag-1", "cluster-tag-2"}, VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6", NodePools: []*KubernetesNodePool{ - { + &KubernetesNodePool{ ID: "8d91899c-0739-4a1a-acc5-deadbeefbb8a", Size: "s-1vcpu-1gb", Count: 2,