Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(gke): allow synchronous cluster teardown #491

Merged
merged 8 commits into from
Jan 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
- GKE cluster builder allows creating a subnet for the cluster instead of using
a default one.
[#490](https://github.com/Kong/kubernetes-testing-framework/pull/490)

- GKE cluster is able to wait for its cleanup synchronously.
[#491](https://github.com/Kong/kubernetes-testing-framework/pull/491)
- MetalLB addon will use an extended timeout when fetching manifests from GH which
should improve its stability.
[#492](https://github.com/Kong/kubernetes-testing-framework/pull/492)
Expand Down
27 changes: 19 additions & 8 deletions pkg/clusters/types/gke/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ type Builder struct {
Name string
project, location string
jsonCreds []byte
waitForTeardown bool

createSubnet bool
addons clusters.Addons
Expand Down Expand Up @@ -68,6 +69,15 @@ func (b *Builder) WithClusterMinorVersion(major, minor uint64) *Builder {
return b
}

// WithWaitForTeardown sets a flag telling whether the cluster should wait for
// a cleanup operation synchronously.
//
// Default: `false`.
func (b *Builder) WithWaitForTeardown(wait bool) *Builder {
b.waitForTeardown = wait
return b
}

// WithCreateSubnet sets a flag telling whether the builder should create a subnet
// for the cluster. If set to `true`, it will create a subnetwork in a default VPC
// with a uniquely generated name. The subnetwork will be removed once the cluster
Expand Down Expand Up @@ -176,14 +186,15 @@ func (b *Builder) Build(ctx context.Context) (clusters.Cluster, error) {
}

cluster := &Cluster{
name: b.Name,
project: b.project,
location: b.location,
jsonCreds: b.jsonCreds,
client: k8s,
cfg: restCFG,
addons: make(clusters.Addons),
l: &sync.RWMutex{},
name: b.Name,
project: b.project,
location: b.location,
jsonCreds: b.jsonCreds,
waitForTeardown: b.waitForTeardown,
client: k8s,
cfg: restCFG,
addons: make(clusters.Addons),
l: &sync.RWMutex{},
}

if err := utils.ClusterInitHooks(ctx, cluster); err != nil {
Expand Down
54 changes: 44 additions & 10 deletions pkg/clusters/types/gke/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@ import (
"os"
"strings"
"sync"
"time"

container "cloud.google.com/go/container/apiv1"
"cloud.google.com/go/container/apiv1/containerpb"
"github.com/blang/semver/v4"
"google.golang.org/api/option"
"k8s.io/client-go/kubernetes"
Expand All @@ -22,14 +24,15 @@ import (

// Cluster is a clusters.Cluster implementation backed by Google Kubernetes Engine (GKE)
type Cluster struct {
name string
project string
location string
jsonCreds []byte
client *kubernetes.Clientset
cfg *rest.Config
addons clusters.Addons
l *sync.RWMutex
name string
project string
location string
jsonCreds []byte
waitForTeardown bool
client *kubernetes.Clientset
cfg *rest.Config
addons clusters.Addons
l *sync.RWMutex
}

// NewFromExistingWithEnv provides a new clusters.Cluster backed by an existing GKE cluster,
Expand Down Expand Up @@ -111,13 +114,44 @@ func (c *Cluster) Cleanup(ctx context.Context) error {
}
defer mgrc.Close()

_, err = deleteCluster(ctx, mgrc, c.name, c.project, c.location)
return err
teardownOp, err := deleteCluster(ctx, mgrc, c.name, c.project, c.location)
if err != nil {
return err
}

if c.waitForTeardown {
fullTeardownOpName := fmt.Sprintf("projects/%s/locations/%s/operations/%s", c.project, c.location, teardownOp.Name)
if err := waitForTeardown(ctx, mgrc, fullTeardownOpName); err != nil {
return err
}
}

return nil
}

return nil
}

func waitForTeardown(ctx context.Context, mgrc *container.ClusterManagerClient, teardownOpName string) error {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()

for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
op, err := mgrc.GetOperation(ctx, &containerpb.GetOperationRequest{Name: teardownOpName})
if err != nil {
return err
}
if op.Status == containerpb.Operation_DONE {
return nil
}
}
}
}

func (c *Cluster) Client() *kubernetes.Clientset {
return c.client
}
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/gke_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ func testGKECluster(t *testing.T, createSubnet bool) {
t.Logf("configuring the GKE cluster PROJECT=(%s) LOCATION=(%s)", gkeProject, gkeLocation)
builder := gke.NewBuilder([]byte(gkeCreds), gkeProject, gkeLocation)
builder.WithClusterMinorVersion(1, 23)
builder.WithWaitForTeardown(true)
builder.WithCreateSubnet(createSubnet)

t.Logf("building cluster %s (this can take some time)", builder.Name)
Expand All @@ -72,6 +73,7 @@ func testGKECluster(t *testing.T, createSubnet bool) {

t.Logf("setting up cleanup for cluster %s", cluster.Name())
defer func() {
t.Logf("running cluster cleanup for %s", cluster.Name())
assert.NoError(t, cluster.Cleanup(ctx))
}()

Expand Down