Skip to content

Commit

Permalink
Don't recreate GCE instances when updating resource_policies property (
Browse files Browse the repository at this point in the history
…#5206) (#10173)

* Don't recreate GCE instances when updating resource_policies property

* Remove ForceNew

* Add test case for update of resource_policies

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician authored Sep 28, 2021
1 parent 4c6f54c commit 3d8f3fc
Show file tree
Hide file tree
Showing 3 changed files with 292 additions and 2 deletions.
3 changes: 3 additions & 0 deletions .changelog/5206.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
compute: Fixed recreation of GCE instances when updating `resource_policies` property
```
34 changes: 32 additions & 2 deletions google/resource_compute_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -754,9 +754,8 @@ func resourceComputeInstance() *schema.Resource {
Elem: &schema.Schema{Type: schema.TypeString},
DiffSuppressFunc: compareSelfLinkRelativePaths,
Optional: true,
ForceNew: true,
MaxItems: 1,
Description: `A list of short names or self_links of resource policies to attach to the instance. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported.`,
Description: `A list of short names or self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`,
},

"reservation_affinity": {
Expand Down Expand Up @@ -1422,6 +1421,37 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
}
}

if d.HasChange("resource_policies") {
if len(instance.ResourcePolicies) > 0 {
req := compute.InstancesRemoveResourcePoliciesRequest{ResourcePolicies: instance.ResourcePolicies}

op, err := config.NewComputeClient(userAgent).Instances.RemoveResourcePolicies(project, zone, instance.Name, &req).Do()
if err != nil {
return fmt.Errorf("Error removing existing resource policies: %s", err)
}

opErr := computeOperationWaitTime(config, op, project, "resource policies to remove", userAgent, d.Timeout(schema.TimeoutUpdate))
if opErr != nil {
return opErr
}
}

resourcePolicies := convertStringArr(d.Get("resource_policies").([]interface{}))
if len(resourcePolicies) > 0 {
req := compute.InstancesAddResourcePoliciesRequest{ResourcePolicies: resourcePolicies}

op, err := config.NewComputeClient(userAgent).Instances.AddResourcePolicies(project, zone, instance.Name, &req).Do()
if err != nil {
return fmt.Errorf("Error adding resource policies: %s", err)
}

opErr := computeOperationWaitTime(config, op, project, "resource policies to add", userAgent, d.Timeout(schema.TimeoutUpdate))
if opErr != nil {
return opErr
}
}
}

bootRequiredSchedulingChange := schedulingHasChangeRequiringReboot(d)
bootNotRequiredSchedulingChange := schedulingHasChangeWithoutReboot(d)
if bootNotRequiredSchedulingChange {
Expand Down
257 changes: 257 additions & 0 deletions google/resource_compute_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,58 @@ func TestAccComputeInstance_kmsDiskEncryption(t *testing.T) {
})
}

func TestAccComputeInstance_resourcePolicyUpdate(t *testing.T) {
t.Parallel()

var instance compute.Instance
var instanceName = fmt.Sprintf("tf-test-%s", randString(t, 10))
var scheduleName1 = fmt.Sprintf("tf-tests-%s", randString(t, 10))
var scheduleName2 = fmt.Sprintf("tf-tests-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccComputeInstance_instanceSchedule(instanceName, scheduleName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
t, "google_compute_instance.foobar", &instance),
testAccCheckComputeResourcePolicy(&instance, "", 0),
),
},
// check adding
{
Config: testAccComputeInstance_addResourcePolicy(instanceName, scheduleName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
t, "google_compute_instance.foobar", &instance),
testAccCheckComputeResourcePolicy(&instance, scheduleName1, 1),
),
},
// check updating
{
Config: testAccComputeInstance_updateResourcePolicy(instanceName, scheduleName1, scheduleName2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
t, "google_compute_instance.foobar", &instance),
testAccCheckComputeResourcePolicy(&instance, scheduleName2, 1),
),
},
// check removing
{
Config: testAccComputeInstance_removeResourcePolicy(instanceName, scheduleName1, scheduleName2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
t, "google_compute_instance.foobar", &instance),
testAccCheckComputeResourcePolicy(&instance, "", 0),
),
},
},
})
}

func TestAccComputeInstance_attachedDisk(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -2413,6 +2465,21 @@ func testAccCheckComputeInstanceAccessConfigHasPTR(instance *compute.Instance) r
}
}

func testAccCheckComputeResourcePolicy(instance *compute.Instance, scheduleName string, resourcePolicyCountWant int) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourcePoliciesCountHave := len(instance.ResourcePolicies)
if resourcePoliciesCountHave != resourcePolicyCountWant {
return fmt.Errorf("number of resource polices does not match: have: %d; want: %d", resourcePoliciesCountHave, resourcePolicyCountWant)
}

if resourcePoliciesCountHave == 1 && !strings.Contains(instance.ResourcePolicies[0], scheduleName) {
return fmt.Errorf("got the wrong schedule: have: %s; want: %s", instance.ResourcePolicies[0], scheduleName)
}

return nil
}
}

func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instance.Disks == nil {
Expand Down Expand Up @@ -3603,6 +3670,196 @@ resource "google_compute_instance" "foobar" {
diskNameToEncryptionKey[diskNames[0]].KmsKeyName, diskNameToEncryptionKey[diskNames[1]].KmsKeyName)
}

func testAccComputeInstance_instanceSchedule(instance, schedule string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
family = "debian-9"
project = "debian-cloud"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "e2-medium"
zone = "us-central1-a"
boot_disk {
initialize_params {
image = data.google_compute_image.my_image.self_link
}
}
network_interface {
network = "default"
}
}
resource "google_compute_resource_policy" "instance_schedule" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "1 1 1 1 1"
}
vm_stop_schedule {
schedule = "2 2 2 2 2"
}
time_zone = "UTC"
}
}
`, instance, schedule)
}

func testAccComputeInstance_addResourcePolicy(instance, schedule string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
family = "debian-9"
project = "debian-cloud"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "e2-medium"
zone = "us-central1-a"
boot_disk {
initialize_params {
image = data.google_compute_image.my_image.self_link
}
}
network_interface {
network = "default"
}
resource_policies = [google_compute_resource_policy.instance_schedule.self_link]
}
resource "google_compute_resource_policy" "instance_schedule" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "1 1 1 1 1"
}
vm_stop_schedule {
schedule = "2 2 2 2 2"
}
time_zone = "UTC"
}
}
`, instance, schedule)
}

func testAccComputeInstance_updateResourcePolicy(instance, schedule1, schedule2 string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
family = "debian-9"
project = "debian-cloud"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "e2-medium"
zone = "us-central1-a"
boot_disk {
initialize_params {
image = data.google_compute_image.my_image.self_link
}
}
network_interface {
network = "default"
}
resource_policies = [google_compute_resource_policy.instance_schedule2.self_link]
}
resource "google_compute_resource_policy" "instance_schedule" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "1 1 1 1 1"
}
vm_stop_schedule {
schedule = "2 2 2 2 2"
}
time_zone = "UTC"
}
}
resource "google_compute_resource_policy" "instance_schedule2" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "2 2 2 2 2"
}
vm_stop_schedule {
schedule = "3 3 3 3 3"
}
time_zone = "UTC"
}
}
`, instance, schedule1, schedule2)
}

func testAccComputeInstance_removeResourcePolicy(instance, schedule1, schedule2 string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
family = "debian-9"
project = "debian-cloud"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "e2-medium"
zone = "us-central1-a"
boot_disk {
initialize_params {
image = data.google_compute_image.my_image.self_link
}
}
network_interface {
network = "default"
}
resource_policies = null
}
resource "google_compute_resource_policy" "instance_schedule" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "1 1 1 1 1"
}
vm_stop_schedule {
schedule = "2 2 2 2 2"
}
time_zone = "UTC"
}
}
resource "google_compute_resource_policy" "instance_schedule2" {
name = "%s"
region = "us-central1"
instance_schedule_policy {
vm_start_schedule {
schedule = "2 2 2 2 2"
}
vm_stop_schedule {
schedule = "3 3 3 3 3"
}
time_zone = "UTC"
}
}
`, instance, schedule1, schedule2)
}

func testAccComputeInstance_attachedDisk(disk, instance string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
Expand Down

0 comments on commit 3d8f3fc

Please sign in to comment.