diff --git a/pkg/backends/syncer_test.go b/pkg/backends/syncer_test.go index 77c9ccacad..f549980917 100644 --- a/pkg/backends/syncer_test.go +++ b/pkg/backends/syncer_test.go @@ -105,7 +105,12 @@ func (p *portset) check(fakeGCE *gce.Cloud) error { return fmt.Errorf("backend for port %+v should exist, but got: %v", sp.NodePort, err) } } else { - if bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)); !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)) + if err == nil || !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + if sp.PrimaryIPNEGEnabled { + // It is expected that these Backends should not get cleaned up in the GC loop. + continue + } return fmt.Errorf("backend for port %+v should not exist, but got %v", sp, bs) } } @@ -333,7 +338,7 @@ func TestGC(t *testing.T) { } } -// Test GC with both ELB and ILBs +// Test GC with both ELB and ILBs. Add in an L4 ILB NEG which should not be deleted as part of GC. func TestGCMixed(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) @@ -345,6 +350,7 @@ func TestGCMixed(t *testing.T) { {NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, + {ID: utils.ServicePortID{Service: types.NamespacedName{Name: "testsvc"}}, PrimaryIPNEGEnabled: true, BackendNamer: defaultNamer}, } ps := newPortset(svcNodePorts) if err := ps.add(svcNodePorts); err != nil { diff --git a/pkg/healthchecks/healthchecks_l4_test.go b/pkg/healthchecks/healthchecks_l4_test.go new file mode 100644 index 0000000000..1c863cbb8c --- /dev/null +++ b/pkg/healthchecks/healthchecks_l4_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecks + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/ingress-gce/pkg/composite" + "testing" +) + +func TestMergeHealthChecks(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + checkIntervalSec int64 + timeoutSec int64 + healthyThreshold int64 + unhealthyThreshold int64 + wantCheckIntervalSec int64 + wantTimeoutSec int64 + wantHealthyThreshold int64 + wantUnhealthyThreshold int64 + }{ + {"unchanged", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"interval - too small - should reconcile", gceHcCheckIntervalSeconds - 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"timeout - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds - 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"healthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold - 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"unhealthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold - 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"interval - user configured - should keep", gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"timeout - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"healthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold}, + {"unhealthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1}, + } { + t.Run(tc.desc, func(t *testing.T) { + wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + hc := &composite.HealthCheck{ + CheckIntervalSec: tc.checkIntervalSec, + TimeoutSec: tc.timeoutSec, + HealthyThreshold: tc.healthyThreshold, + UnhealthyThreshold: tc.unhealthyThreshold, + } + mergeHealthChecks(hc, wantHC) + if wantHC.CheckIntervalSec != tc.wantCheckIntervalSec { + t.Errorf("wantHC.CheckIntervalSec = %d; want %d", wantHC.CheckIntervalSec, tc.checkIntervalSec) + } + if wantHC.TimeoutSec != tc.wantTimeoutSec { + t.Errorf("wantHC.TimeoutSec = %d; want %d", wantHC.TimeoutSec, tc.timeoutSec) + } + if wantHC.HealthyThreshold != tc.wantHealthyThreshold { + t.Errorf("wantHC.HealthyThreshold = %d; want %d", wantHC.HealthyThreshold, tc.healthyThreshold) + } + if wantHC.UnhealthyThreshold != tc.wantUnhealthyThreshold { + t.Errorf("wantHC.UnhealthyThreshold = %d; want %d", wantHC.UnhealthyThreshold, tc.unhealthyThreshold) + } + }) + } +} + +func TestCompareHealthChecks(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + modifier func(*composite.HealthCheck) + wantChanged bool + }{ + {"unchanged", nil, false}, + {"nil HttpHealthCheck", func(hc *composite.HealthCheck) { hc.HttpHealthCheck = nil }, true}, + {"desc does not match", func(hc *composite.HealthCheck) { hc.Description = "bad-desc" }, true}, + {"port does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.Port = 54321 }, true}, + {"requestPath does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.RequestPath = "/anotherone" }, true}, + {"interval needs update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 }, true}, + {"timeout needs update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds - 1 }, true}, + {"healthy threshold needs update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold - 1 }, true}, + {"unhealthy threshold needs update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold - 1 }, true}, + {"interval does not need update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds + 1 }, false}, + {"timeout does not need update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds + 1 }, false}, + {"healthy threshold does not need update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold + 1 }, false}, + {"unhealthy threshold does not need update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold + 1 }, false}, + } { + t.Run(tc.desc, func(t *testing.T) { + hc := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + if tc.modifier != nil { + tc.modifier(hc) + } + if gotChanged := needToUpdateHealthChecks(hc, wantHC); gotChanged != tc.wantChanged { + t.Errorf("needToUpdateHealthChecks(%#v, %#v) = %t; want changed = %t", hc, wantHC, gotChanged, tc.wantChanged) + } + }) + } +} diff --git a/pkg/l4controller/l4controller_test.go b/pkg/l4controller/l4controller_test.go new file mode 100644 index 0000000000..dfa17ecdf4 --- /dev/null +++ b/pkg/l4controller/l4controller_test.go @@ -0,0 +1,229 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package l4controller + +import ( + "k8s.io/client-go/kubernetes" + testing2 "k8s.io/client-go/testing" + "k8s.io/ingress-gce/pkg/loadbalancers" + "k8s.io/ingress-gce/pkg/neg/types" + "reflect" + "testing" + "time" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + api_v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/context" + "k8s.io/ingress-gce/pkg/test" + "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + clusterUID = "aaaaa" + resetLBStatus = "{\"status\":{\"loadBalancer\":{\"ingress\":null}}}" +) + +func newServiceController() *L4Controller { + kubeClient := fake.NewSimpleClientset() + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + (fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = loadbalancers.InsertForwardingRuleHook + + namer := namer.NewNamer(clusterUID, "") + + stopCh := make(chan struct{}) + ctxConfig := context.ControllerContextConfig{ + Namespace: api_v1.NamespaceAll, + ResyncPeriod: 1 * time.Minute, + } + ctx := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig) + return NewL4Controller(ctx, stopCh) +} + +func addILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Create(svc) + l4c.ctx.ServiceInformer.GetIndexer().Add(svc) +} + +func updateILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Update(svc) + l4c.ctx.ServiceInformer.GetIndexer().Update(svc) +} + +func deleteILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{}) + l4c.ctx.ServiceInformer.GetIndexer().Delete(svc) +} + +func addNEG(l4c *L4Controller, svc *api_v1.Service) { + // Also create a fake NEG for this service since the sync code will try to link the backend service to NEG + negName := l4c.ctx.ClusterNamer.PrimaryIPNEG(svc.Namespace, svc.Name) + neg := &composite.NetworkEndpointGroup{Name: negName} + key := meta.ZonalKey(negName, types.TestZone1) + composite.CreateNetworkEndpointGroup(l4c.ctx.Cloud, key, neg) +} + +func getKeyForSvc(svc *api_v1.Service, t *testing.T) string { + key, err := common.KeyFunc(svc) + if err != nil { + t.Fatalf("Failed to get key for service %v, err : %v", svc, err) + } + return key +} + +// validatePatchRequest validates that the given client patched the resource with the given change. +// This is needed because there is a bug in go-client test implementation where a patch operation cannot be used +// to delete fields - https://github.com/kubernetes/client-go/issues/607 +// TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. +func validatePatchRequest(client kubernetes.Interface, patchVal string, t *testing.T) { + fakeClient := client.(*fake.Clientset) + actionLen := len(fakeClient.Actions()) + if actionLen == 0 { + t.Errorf("Expected atleast one action in fake client") + } + // The latest action should be the one setting status to the given value + patchAction := fakeClient.Actions()[actionLen-1].(testing2.PatchAction) + if !reflect.DeepEqual(patchAction.GetPatch(), []byte(patchVal)) { + t.Errorf("Expected patch '%s', got '%s'", patchVal, string(patchAction.GetPatch())) + } +} + +func validateSvcStatus(svc *api_v1.Service, expectStatus bool, t *testing.T) { + if common.HasGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) != expectStatus { + t.Fatalf("Expected L4 finalizer present to be %v, but it was %v", expectStatus, !expectStatus) + } + if len(svc.Status.LoadBalancer.Ingress) == 0 || svc.Status.LoadBalancer.Ingress[0].IP == "" { + if expectStatus { + t.Fatalf("Invalid LoadBalancer status field in service - %+v", svc.Status.LoadBalancer) + } + } + if len(svc.Status.LoadBalancer.Ingress) > 0 && !expectStatus { + // TODO uncomment below once https://github.com/kubernetes/client-go/issues/607 has been fixed. + // t.Fatalf("Expected LoadBalancer status to be empty, Got %v", svc.Status.LoadBalancer) + } +} + +// TestProcessCreateOrUpdate verifies the processing loop in L4Controller. +// This test adds a new service, then performs a valid update and then modifies the service type to External and ensures +// that the status field is as expected in each case. +func TestProcessCreateOrUpdate(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // set the TrafficPolicy of the service to Local + newSvc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // Remove the Internal LoadBalancer annotation, this should trigger a cleanup. + delete(newSvc.Annotations, gce.ServiceAnnotationLoadBalancerType) + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. + validatePatchRequest(l4c.client, resetLBStatus, t) + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, false, t) +} + +func TestProcessDeletion(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // Mark the service for deletion by updating timestamp. Use svc instead of newSvc since that has the finalizer. + newSvc.DeletionTimestamp = &v1.Time{} + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. + validatePatchRequest(l4c.client, resetLBStatus, t) + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, false, t) + deleteILBService(l4c, newSvc) + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if newSvc != nil { + t.Errorf("Expected service to be deleted, but was found - %v", newSvc) + } +} + +func TestProcessCreateLegacyService(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + // Set the legacy finalizer + newSvc.Finalizers = append(newSvc.Finalizers, common.LegacyILBFinalizer) + addILBService(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that the status field is not updated. + svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, false, t) +} diff --git a/pkg/loadbalancers/fakes.go b/pkg/loadbalancers/fakes.go index dcab2ee54b..4ca9a41528 100644 --- a/pkg/loadbalancers/fakes.go +++ b/pkg/loadbalancers/fakes.go @@ -43,3 +43,10 @@ func InsertGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *com } return false, nil } + +func InsertForwardingRuleHook(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule, m *cloud.MockForwardingRules) (b bool, e error) { + if obj.IPAddress == "" { + obj.IPAddress = "10.0.0.1" + } + return false, nil +} diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go new file mode 100644 index 0000000000..8a95cd19bb --- /dev/null +++ b/pkg/loadbalancers/l4_test.go @@ -0,0 +1,1098 @@ +package loadbalancers + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "google.golang.org/api/compute/v1" + "k8s.io/ingress-gce/pkg/backends" + "k8s.io/ingress-gce/pkg/firewalls" + "k8s.io/ingress-gce/pkg/utils" + "reflect" + "strings" + "sync" + "testing" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" + "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + servicehelper "k8s.io/cloud-provider/service/helpers" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/test" + namer_util "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + clusterUID = "aaaaa" + // TODO Uncomment after https://github.com/kubernetes/kubernetes/pull/87667 is available in vendor. + // eventMsgFirewallChange = "XPN Firewall change required by network admin" +) + +func getFakeGCECloud(vals gce.TestClusterValues) *gce.Cloud { + fakeGCE := gce.NewFakeGCECloud(vals) + // InsertHook required to assign an IP Address for forwarding rule + (fakeGCE.Compute().(*cloud.MockGCE)).MockAddresses.InsertHook = mock.InsertAddressHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockAlphaAddresses.X = mock.AddressAttributes{} + (fakeGCE.Compute().(*cloud.MockGCE)).MockAddresses.X = mock.AddressAttributes{} + (fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = mock.InsertFwdRuleHook + + (fakeGCE.Compute().(*cloud.MockGCE)).MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockFirewalls.UpdateHook = mock.UpdateFirewallHook + return fakeGCE +} + +func TestEnsureInternalBackendServiceUpdates(t *testing.T) { + t.Parallel() + fakeGCE := getFakeGCECloud(gce.DefaultTestClusterValues()) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + bsName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + _, err := l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(svc.Spec.SessionAffinity), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + + // Update the Internal Backend Service with a new ServiceAffinity + _, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + key := meta.RegionalKey(bsName, l.cloud.Region()) + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to get backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Expected session affinity '%s' in %+v, Got '%s'", strings.ToUpper(string(v1.ServiceAffinityNone)), bs, bs.SessionAffinity) + } + // Change the Connection Draining timeout to a different value manually. Also update session Affinity to trigger + // an update in the Ensure method. The timeout value should not be reconciled. + newTimeout := int64(backends.DefaultConnectionDrainingTimeoutSeconds * 2) + bs.ConnectionDraining.DrainingTimeoutSec = newTimeout + bs.SessionAffinity = strings.ToUpper(string(v1.ServiceAffinityClientIP)) + err = composite.UpdateBackendService(l.cloud, key, bs) + if err != nil { + t.Errorf("Failed to update backend service with new connection draining timeout - err %v", err) + } + bs, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Backend service did not get updated.") + } + if bs.ConnectionDraining.DrainingTimeoutSec != newTimeout { + t.Errorf("Connection Draining timeout got reconciled to %d, expected %d", bs.ConnectionDraining.DrainingTimeoutSec, newTimeout) + } +} + +func TestEnsureInternalLoadBalancer(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) +} + +func TestEnsureInternalLoadBalancerTypeChange(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Now add the latest annotation and change scheme to external + svc.Annotations[gce.ServiceAnnotationLoadBalancerType] = "" + // This will be invoked by service_controller + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + + // Create the expected resources necessary for an Internal Load Balancer + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), lbName) + hcPath, hcPort := gce.GetNodesHealthCheckPath(), gce.GetNodesHealthCheckPort() + _, hcLink, err := healthchecks.EnsureL4HealthCheck(l.cloud, hcName, l.NamespacedName, sharedHC, hcPath, hcPort) + if err != nil { + t.Errorf("Failed to create healthcheck, err %v", err) + } + _, err = l.backendPool.EnsureL4BackendService(lbName, hcLink, "TCP", string(l.Service.Spec.SessionAffinity), + string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to create backendservice, err %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) +} + +// TestEnsureInternalLoadBalancerClearPreviousResources creates ILB resources with incomplete configuration and verifies +// that they are updated when the controller processes the load balancer service. +func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + frName := l.GetFRName() + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + + // Create a ForwardingRule that's missing an IP address + existingFwdRule := &composite.ForwardingRule{ + Name: frName, + IPAddress: "", + Ports: []string{"123"}, + IPProtocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + } + if err = composite.CreateForwardingRule(l.cloud, key, existingFwdRule); err != nil { + t.Errorf("Failed to create fake forwarding rule %s, err %v", lbName, err) + } + key.Name = lbName + // Create a Firewall that's missing a Description + existingFirewall := &compute.Firewall{ + Name: lbName, + Network: fakeGCE.NetworkURL(), + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: "tcp", + Ports: []string{"123"}, + }, + }, + } + fakeGCE.CreateFirewall(existingFirewall) + + sharedHealthCheck := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHealthCheck, l.namer.UID(), lbName) + + // Create a healthcheck with an incomplete fields + existingHC := &composite.HealthCheck{Name: hcName} + // hcName will be same as lbName since this service uses trafficPolicy Local. So the same key can be used. + if err = composite.CreateHealthCheck(fakeGCE, key, existingHC); err != nil { + t.Errorf("Failed to create fake healthcheck %s, err %v", hcName, err) + } + + // Create a backend Service that's missing Description and Backends + existingBS := &composite.BackendService{ + Name: lbName, + Protocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + } + + if err = composite.CreateBackendService(fakeGCE, key, existingBS); err != nil { + t.Errorf("Failed to create fake backend service %s, err %v", lbName, err) + } + existingFwdRule.BackendService = existingBS.Name + + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + key.Name = frName + // Expect new resources with the correct attributes to be created + newFwdRule, err := composite.GetForwardingRule(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup forwarding rule %s, err %v", lbName, err) + } + if newFwdRule == existingFwdRule { + t.Errorf("Expected incomplete forwarding rule to be updated") + } + + newFirewall, err := fakeGCE.GetFirewall(lbName) + if err != nil { + t.Errorf("Failed to lookup firewall rule %s, err %v", lbName, err) + } + if newFirewall == existingFirewall { + t.Errorf("Expected incomplete firewall rule to be updated") + } + + key.Name = lbName + newHC, err := composite.GetHealthCheck(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup healthcheck %s, err %v", lbName, err) + } + if newHC == existingHC || newHC.SelfLink == "" { + t.Errorf("Expected incomplete healthcheck to be updated") + } + + newBS, err := composite.GetBackendService(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup backend service %s, err %v", lbName, err) + } + if newBS == existingBS { + t.Errorf("Expected incomplete backend service to be updated") + } +} + +// TestUpdateResourceLinks verifies that an existing backend service created with different healthchecks is reconciled +// upon load balancer sync. The other healthchecks are not deleted. +func TestUpdateResourceLinks(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + key.Name = "hc1" + hc1 := &composite.HealthCheck{Name: "hc1"} + if err = composite.CreateHealthCheck(fakeGCE, key, hc1); err != nil { + t.Errorf("Failed to create fake healthcheck hc1 , err %v", err) + } + + key.Name = "hc2" + hc2 := &composite.HealthCheck{Name: "hc2"} + if err = composite.CreateHealthCheck(fakeGCE, key, hc2); err != nil { + t.Errorf("Failed to create fake healthcheck hc2, err %v", err) + } + + key.Name = lbName + // Create a backend Service that's missing Description and Backends + existingBS := &composite.BackendService{ + Name: lbName, + Protocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + HealthChecks: []string{"hc1", "hc2"}, + } + + if err = composite.CreateBackendService(fakeGCE, key, existingBS); err != nil { + t.Errorf("Failed to create fake backend service %s, err %v", lbName, err) + } + bs, err := composite.GetBackendService(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup backend service") + } + if !reflect.DeepEqual(bs.HealthChecks, []string{"hc1", "hc2"}) { + t.Errorf("Unexpected healthchecks in backend service - %v", bs.HealthChecks) + } + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + // verifies that the right healthcheck is present + assertInternalLbResources(t, svc, l, nodeNames) + + // ensure that the other healthchecks still exist. + key.Name = "hc1" + if hc1, err = composite.GetHealthCheck(fakeGCE, key, meta.VersionGA); err != nil { + t.Errorf("Failed to lookup healthcheck - hc1") + } + if hc1 == nil { + t.Errorf("Got nil healthcheck") + } + key.Name = "hc2" + if hc2, err = composite.GetHealthCheck(fakeGCE, key, meta.VersionGA); err != nil { + t.Errorf("Failed to lookup healthcheck - hc1") + } + if hc2 == nil { + t.Errorf("Got nil healthcheck") + } +} + +func TestEnsureInternalLoadBalancerHealthCheckConfigurable(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + sharedHealthCheck := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHealthCheck, l.namer.UID(), lbName) + + // Create a healthcheck with an incorrect threshold, default value is 8s. + existingHC := &composite.HealthCheck{Name: hcName, CheckIntervalSec: 6000} + if err = composite.CreateHealthCheck(fakeGCE, key, existingHC); err != nil { + t.Errorf("Failed to create fake healthcheck %s, err %v", hcName, err) + } + + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + + newHC, err := composite.GetHealthCheck(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup healthcheck %s, err %v", lbName, err) + } + if newHC.CheckIntervalSec != existingHC.CheckIntervalSec { + t.Errorf("Check interval got incorrectly reconciled") + } +} + +func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) + + // Deleting the loadbalancer and resources again should not cause an error. + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + healthCheckNodePort := int32(10101) + svc.Spec.HealthCheckNodePort = healthCheckNodePort + svc.Spec.Type = v1.ServiceTypeLoadBalancer + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Global) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + hc, err := composite.GetHealthCheck(l.cloud, key, meta.VersionGA) + if err != nil || hc == nil { + t.Errorf("Failed to get healthcheck, err %v", err) + } + if hc.HttpHealthCheck.Port != int64(healthCheckNodePort) { + t.Errorf("Unexpected port in healthcheck, expected %d, Got %d", healthCheckNodePort, hc.HttpHealthCheck.Port) + } +} + +type EnsureILBParams struct { + clusterName string + clusterID string + service *v1.Service + existingFwdRule *composite.ForwardingRule +} + +// newEnsureILBParams is the constructor of EnsureILBParams. +func newEnsureILBParams() *EnsureILBParams { + vals := gce.DefaultTestClusterValues() + return &EnsureILBParams{ + vals.ClusterName, + vals.ClusterID, + test.NewL4ILBService(false, 8080), + nil, + } +} + +// TestEnsureInternalLoadBalancerErrors tests the function +// EnsureInternalLoadBalancer, making sure the system won't panic when +// exceptions raised by gce. +func TestEnsureInternalLoadBalancerErrors(t *testing.T) { + vals := gce.DefaultTestClusterValues() + var params *EnsureILBParams + + for desc, tc := range map[string]struct { + adjustParams func(*EnsureILBParams) + injectMock func(*cloud.MockGCE) + }{ + "EnsureInternalBackendService failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockRegionBackendServices.GetHook = mock.GetRegionBackendServicesErrHook + }, + }, + "Create internal health check failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockHealthChecks.GetHook = mock.GetHealthChecksInternalErrHook + }, + }, + "Create firewall failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook + }, + }, + "Create region forwarding rule failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.InsertHook = mock.InsertForwardingRulesInternalErrHook + }, + }, + "Get region forwarding rule failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.GetHook = mock.GetForwardingRulesInternalErrHook + }, + }, + "Delete region forwarding rule failed": { + adjustParams: func(params *EnsureILBParams) { + params.existingFwdRule = &composite.ForwardingRule{BackendService: "badBackendService"} + }, + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.DeleteHook = mock.DeleteForwardingRuleErrHook + }, + }, + } { + t.Run(desc, func(t *testing.T) { + fakeGCE := getFakeGCECloud(gce.DefaultTestClusterValues()) + nodeNames := []string{"test-node-1"} + params = newEnsureILBParams() + if tc.adjustParams != nil { + tc.adjustParams(params) + } + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(params.service, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + //lbName := l.namer.PrimaryIPNEG(params.service.Namespace, params.service.Name) + frName := l.GetFRName() + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + _, err = test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + // Create a dummy forwarding rule in order to trigger a delete in the EnsureInternalLoadBalancer function. + if err = composite.CreateForwardingRule(l.cloud, key, &composite.ForwardingRule{Name: frName}); err != nil { + t.Errorf("Failed to create fake forwarding rule %s, err %v", frName, err) + } + // Inject error hooks after creating the forwarding rule + if tc.injectMock != nil { + tc.injectMock(fakeGCE.Compute().(*cloud.MockGCE)) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, params.service) + if err == nil { + t.Errorf("Expected error when %s", desc) + } + if status != nil { + t.Errorf("Expected empty status when %s, Got %v", desc, status) + } + }) + } +} + +/* TODO uncomment after https://github.com/kubernetes/kubernetes/pull/87667 is available in vendor +func TestEnsureLoadBalancerDeletedSucceedsOnXPN(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + vals.OnXPN = true + fakeGCE := gce.NewFakeGCECloud(vals) + c := fakeGCE.Compute().(*cloud.MockGCE) + svc := test.NewL4ILBService(false, 8080) + nodeNames := []string{"test-node-1"} + namer := namer_util.NewNamer(clusterUID, "") + recorder := record.NewFakeRecorder(100) + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, recorder, &sync.Mutex{})) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + fwName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + c.MockFirewalls.DeleteHook = mock.DeleteFirewallsUnauthorizedErrHook + + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Failed to delete loadBalancer, err %v", err) + } + gcloudcmd := gce.FirewallToGCloudDeleteCmd(fwName, fakeGCE.ProjectID()) + XPNErrMsg := fmt.Sprintf("%s %s: `%v`", v1.EventTypeNormal, eventMsgFirewallChange, gcloudcmd) + err = test.CheckEvent(recorder, XPNErrMsg, true) + if err != nil { + t.Errorf("Failed to check event, err %v", err) + } +} +*/ + +func TestEnsureInternalLoadBalancerEnableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + frName := l.GetFRName() + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Change service to include the global access annotation + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + betaRuleDescString, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(svc.Namespace, svc.Name), "1.2.3.0", meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when creating description - %v", err) + } + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + if fwdRule.Description != betaRuleDescString { + t.Errorf("Expected description %s, Got %s", betaRuleDescString, fwdRule.Description) + } + // remove the annotation and disable global access. + delete(svc.Annotations, gce.ServiceAnnotationILBAllowGlobalAccess) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + gaRuleDescString, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(svc.Namespace, svc.Name), "1.2.3.0", meta.VersionGA) + if err != nil { + t.Errorf("Unexpected error when creating description - %v", err) + } + // Fetch the beta version of the rule to make sure GlobalAccess field is off. Calling the GA API will always show + // this field as false. + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected true value for AllowGlobalAccess") + } + if fwdRule.Description != gaRuleDescString { + t.Errorf("Expected description %s, Got %s", gaRuleDescString, fwdRule.Description) + } + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDisableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + frName := l.GetFRName() + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + + // disable global access - setting the annotation to false or removing annotation will disable it + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "false" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected 'true' value for AllowGlobalAccess") + } + + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + fakeGCE.AlphaFeatureGate = gce.NewAlphaFeatureGate([]string{gce.AlphaFeatureILBCustomSubnet}) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + + frName := l.GetFRName() + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value %s in ILB ForwardingRule", fwdRule.Subnetwork) + } + + // Change service to include the global access annotation and request static ip + requestedIP := "4.5.6.7" + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "test-subnet" + svc.Spec.LoadBalancerIP = requestedIP + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Fatalf("Reserved IP %s not propagated, Got '%s'", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "test-subnet") { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule, expected 'test-subnet'", fwdRule.Subnetwork) + } + + // Change to a different subnet + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "another-subnet" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Errorf("Reserved IP %s not propagated, Got %s", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "another-subnet") { + t.Errorf("Unexpected subnet value' %s' in ILB ForwardingRule, expected 'another-subnet'", fwdRule.Subnetwork) + } + // remove the annotation - ILB should revert to default subnet. + delete(svc.Annotations, gce.ServiceAnnotationILBSubnet) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule.", fwdRule.Subnetwork) + } + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error deleting loadbalancer - err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalFirewallPortRanges(t *testing.T) { + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + fwName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + tc := struct { + Input []int + Result []string + }{ + Input: []int{15, 37, 900, 2002, 2003, 2003, 2004, 2004}, Result: []string{"15", "37", "900", "2002-2004"}, + } + c := fakeGCE.Compute().(*cloud.MockGCE) + c.MockFirewalls.InsertHook = nil + c.MockFirewalls.UpdateHook = nil + + nodeNames := []string{"test-node-1"} + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + c.MockFirewalls.InsertHook = nil + c.MockFirewalls.UpdateHook = nil + + sourceRange := []string{"10.0.0.0/20"} + firewalls.EnsureL4InternalFirewallRule( + l.cloud, + fwName, + "1.2.3.4", + utils.ServiceKeyFunc(svc.Namespace, svc.Name), + sourceRange, + utils.GetPortRanges(tc.Input), + nodeNames, + string(v1.ProtocolTCP)) + if err != nil { + t.Errorf("Unexpected error %v when ensuring firewall rule %s for svc %+v", err, fwName, svc) + } + existingFirewall, err := l.cloud.GetFirewall(fwName) + if err != nil || existingFirewall == nil || len(existingFirewall.Allowed) == 0 { + t.Errorf("Unexpected error %v when looking up firewall %s, Got firewall %+v", err, fwName, existingFirewall) + } + existingPorts := existingFirewall.Allowed[0].Ports + if !reflect.DeepEqual(existingPorts, tc.Result) { + t.Errorf("Expected firewall rule with ports %v,got %v", tc.Result, existingPorts) + } +} + +func assertInternalLbResources(t *testing.T, apiService *v1.Service, l *L4, nodeNames []string) { + // Check that Firewalls are created for the LoadBalancer and the HealthCheck + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + resourceDesc, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(apiService.Namespace, apiService.Name), "", meta.VersionGA) + if err != nil { + t.Errorf("Failed to create description for resources, err %v", err) + } + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err != nil { + t.Fatalf("Failed to fetch firewall rule %s - err %v", fwName, err) + } + if !utils.EqualStringSets(nodeNames, firewall.TargetTags) { + t.Fatalf("Expected firewall rule target tags '%v', Got '%v'", nodeNames, firewall.TargetTags) + } + if len(firewall.SourceRanges) == 0 { + t.Fatalf("Unexpected empty source range for firewall rule %v", firewall) + } + if firewall.Description != resourceDesc { + t.Errorf("Unexpected description in firewall - Expected %s, Got %s", firewall.Description, resourceDesc) + } + } + + // Check that HealthCheck is created + healthcheck, err := composite.GetHealthCheck(l.cloud, meta.GlobalKey(hcName), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch healthcheck %s - err %v", hcName, err) + } + if healthcheck.Name != hcName { + t.Errorf("Unexpected name for healthcheck '%s' - expected '%s'", healthcheck.Name, hcName) + } + // Only non-shared Healthchecks get a description. + if !sharedHC && healthcheck.Description != resourceDesc { + t.Errorf("Unexpected description in healthcheck - Expected %s, Got %s", healthcheck.Description, resourceDesc) + } + + // Check that BackendService exists + backendServiceName := resourceName + key := meta.RegionalKey(backendServiceName, l.cloud.Region()) + backendServiceLink := cloud.SelfLink(meta.VersionGA, l.cloud.ProjectID(), "backendServices", key) + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch backend service %s - err %v", backendServiceName, err) + } + if bs.Protocol != "TCP" { + t.Errorf("Unexpected protocol '%s' for backend service %v", bs.Protocol, bs) + } + if bs.SelfLink != backendServiceLink { + t.Errorf("Unexpected self link in backend service - Expected %s, Got %s", bs.SelfLink, backendServiceLink) + } + if bs.Description != resourceDesc { + t.Errorf("Unexpected description in backend service - Expected %s, Got %s", bs.Description, resourceDesc) + } + if !utils.EqualStringSets(bs.HealthChecks, []string{healthcheck.SelfLink}) { + t.Errorf("Unexpected healthcheck reference '%v' in backend service, expected '%s'", bs.HealthChecks, + healthcheck.SelfLink) + } + // Check that ForwardingRule is created + frName := l.GetFRName() + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch forwarding rule %s - err %v", frName, err) + + } + if fwdRule.Name != frName { + t.Errorf("Unexpected name for forwarding rule '%s' - expected '%s'", fwdRule.Name, frName) + } + if fwdRule.IPProtocol != "TCP" { + t.Errorf("Unexpected protocol '%s' for forwarding rule %v", fwdRule.IPProtocol, fwdRule) + } + if fwdRule.BackendService != backendServiceLink { + t.Errorf("Unexpected backend service link '%s' for forwarding rule, expected '%s'", fwdRule.BackendService, backendServiceLink) + } + if fwdRule.Subnetwork != l.cloud.NetworkURL() { + t.Errorf("Unexpected subnetwork '%s' in forwarding rule, expected '%s'", + fwdRule.Subnetwork, l.cloud.NetworkURL()) + } +} + +func assertInternalLbResourcesDeleted(t *testing.T, apiService *v1.Service, firewallsDeleted bool, l *L4) { + frName := l.GetFRName() + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + + if firewallsDeleted { + // Check that Firewalls are deleted for the LoadBalancer and the HealthCheck + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err == nil || firewall != nil { + t.Errorf("Expected error when looking up firewall rule after deletion") + } + } + } + + // Check forwarding rule is deleted + fwdRule, err := l.cloud.GetRegionForwardingRule(frName, l.cloud.Region()) + if err == nil || fwdRule != nil { + t.Errorf("Expected error when looking up forwarding rule after deletion") + } + + // Check that HealthCheck is deleted + healthcheck, err := l.cloud.GetHealthCheck(hcName) + if err == nil || healthcheck != nil { + t.Errorf("Expected error when looking up healthcheck after deletion") + } + bs, err := l.cloud.GetRegionBackendService(resourceName, l.cloud.Region()) + if err == nil || bs != nil { + t.Errorf("Expected error when looking up backend service after deletion") + } +} diff --git a/pkg/test/utils.go b/pkg/test/utils.go index 3636a8ac66..c517559747 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -1,21 +1,31 @@ package test import ( + "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/annotations" backendconfig "k8s.io/ingress-gce/pkg/apis/backendconfig/v1beta1" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/legacy-cloud-providers/gce" + "strings" + "time" ) const ( FinalizerAddFlag = flag("enable-finalizer-add") FinalizerRemoveFlag = flag("enable-finalizer-remove") EnableV2FrontendNamerFlag = flag("enable-v2-frontend-namer") + testServiceName = "ilbtest" + testServiceNamespace = "default" ) var ( @@ -57,6 +67,28 @@ func NewService(name types.NamespacedName, spec api_v1.ServiceSpec) *api_v1.Serv } } +// NewL4ILBService creates a Service of type LoadBalancer with the Internal annotation. +func NewL4ILBService(onlyLocal bool, port int) *api_v1.Service { + svc := &api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Annotations: map[string]string{gce.ServiceAnnotationLoadBalancerType: string(gce.LBTypeInternal)}, + }, + Spec: api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeLoadBalancer, + SessionAffinity: api_v1.ServiceAffinityClientIP, + Ports: []api_v1.ServicePort{ + {Name: "testport", Port: int32(port), Protocol: "TCP"}, + }, + }, + } + if onlyLocal { + svc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + } + return svc +} + // NewBackendConfig returns a BackendConfig with the given spec. func NewBackendConfig(name types.NamespacedName, spec backendconfig.BackendConfigSpec) *backendconfig.BackendConfig { return &backendconfig.BackendConfig{ @@ -110,3 +142,97 @@ func (s *FlagSaver) Reset(key flag, flagPointer *bool) { *flagPointer = val } } + +// CreateAndInsertNodes adds the given nodeNames in the given zone as GCE instances, so they can be looked up in tests. +func CreateAndInsertNodes(gce *gce.Cloud, nodeNames []string, zoneName string) ([]*api_v1.Node, error) { + nodes := []*api_v1.Node{} + + for _, name := range nodeNames { + // Inserting the same node name twice causes an error - here we check if + // the instance exists already before insertion. + exists, err := GCEInstanceExists(name, gce) + if err != nil { + return nil, err + } + if !exists { + err := gce.InsertInstance( + gce.ProjectID(), + zoneName, + &compute.Instance{ + Name: name, + Tags: &compute.Tags{ + Items: []string{name}, + }, + }, + ) + if err != nil { + return nodes, err + } + } + + nodes = append( + nodes, + &api_v1.Node{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + api_v1.LabelHostname: name, + api_v1.LabelZoneFailureDomain: zoneName, + }, + }, + Status: api_v1.NodeStatus{ + NodeInfo: api_v1.NodeSystemInfo{ + KubeProxyVersion: "v1.7.2", + }, + }, + }, + ) + + } + return nodes, nil +} + +// GCEInstanceExists returns if a given instance name exists. +func GCEInstanceExists(name string, g *gce.Cloud) (bool, error) { + zones, err := g.GetAllCurrentZones() + if err != nil { + return false, err + } + for _, zone := range zones.List() { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + if _, err := g.Compute().Instances().Get(ctx, meta.ZonalKey(name, zone)); err != nil { + if utils.IsNotFoundError(err) { + return false, nil + } else { + return false, err + } + } else { + // instance has been found + return true, nil + } + } + return false, nil +} + +// CheckEvent watches for events in the given FakeRecorder and checks if it matches the given string. +// It will be used in the l4 firewall XPN tests once TestEnsureLoadBalancerDeletedSucceedsOnXPN and others are +// uncommented. +func CheckEvent(recorder *record.FakeRecorder, expected string, shouldMatch bool) error { + select { + case received := <-recorder.Events: + if strings.HasPrefix(received, expected) != shouldMatch { + if shouldMatch { + return fmt.Errorf("Should receive message \"%v\" but got \"%v\".", expected, received) + } else { + return fmt.Errorf("Unexpected event \"%v\".", received) + } + } + return nil + case <-time.After(2 * time.Second): + if shouldMatch { + return fmt.Errorf("Should receive message \"%v\" but got timed out.", expected) + } + return nil + } +} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 583e4af086..9b29480040 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/flags" @@ -780,5 +782,29 @@ func TestIsLegacyL4ILBService(t *testing.T) { if IsLegacyL4ILBService(svc) { t.Errorf("Expected False for Legacy service %s, got True", svc.Name) } +} +func TestGetPortRanges(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + Desc string + Input []int + Result []string + }{ + {Desc: "All Unique", Input: []int{8, 66, 23, 13, 89}, Result: []string{"8", "13", "23", "66", "89"}}, + {Desc: "All Unique Sorted", Input: []int{1, 7, 9, 16, 26}, Result: []string{"1", "7", "9", "16", "26"}}, + {Desc: "Ranges", Input: []int{56, 78, 67, 79, 21, 80, 12}, Result: []string{"12", "21", "56", "67", "78-80"}}, + {Desc: "Ranges Sorted", Input: []int{5, 7, 90, 1002, 1003, 1004, 1005, 2501}, Result: []string{"5", "7", "90", "1002-1005", "2501"}}, + {Desc: "Ranges Duplicates", Input: []int{15, 37, 900, 2002, 2003, 2003, 2004, 2004}, Result: []string{"15", "37", "900", "2002-2004"}}, + {Desc: "Duplicates", Input: []int{10, 10, 10, 10, 10}, Result: []string{"10"}}, + {Desc: "Only ranges", Input: []int{18, 19, 20, 21, 22, 55, 56, 77, 78, 79, 3504, 3505, 3506}, Result: []string{"18-22", "55-56", "77-79", "3504-3506"}}, + {Desc: "Single Range", Input: []int{6000, 6001, 6002, 6003, 6004, 6005}, Result: []string{"6000-6005"}}, + {Desc: "One value", Input: []int{12}, Result: []string{"12"}}, + {Desc: "Empty", Input: []int{}, Result: nil}, + } { + result := GetPortRanges(tc.Input) + if diff := cmp.Diff(result, tc.Result); diff != "" { + t.Errorf("GetPortRanges(%s) mismatch, (-want +got): \n%s", tc.Desc, diff) + } + } }