From 9d454d8b035b49ee5eb0fea279eb1750481fd8e6 Mon Sep 17 00:00:00 2001 From: Damian Sawicki Date: Wed, 5 Apr 2023 18:42:28 +0000 Subject: [PATCH] JSONify healthcheck Description for BackendConfig --- cmd/glbc/main.go | 7 +- pkg/healthchecks/healthchecks.go | 34 ++++++- pkg/healthchecks/healthchecks_test.go | 90 +++++++++++++++---- pkg/psc/controller.go | 6 +- pkg/psc/controller_test.go | 7 +- pkg/translator/healthchecks.go | 17 ++++ pkg/utils/descutils/utils.go | 72 ++++++++++++++- pkg/utils/descutils/utils_test.go | 61 +++++++++++++ .../cloud-provider-gcp/providers/gce/gce.go | 12 ++- .../providers/gce/gce_fake.go | 12 +++ 10 files changed, 281 insertions(+), 37 deletions(-) diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index 9be8f3604f..d9cba78f16 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -307,12 +307,7 @@ func runControllers(ctx *ingctx.ControllerContext) { // In NonGCP mode, use the zone specified in gce.conf directly. // This overrides the zone/fault-domain label on nodes for NEG controller. if flags.F.EnableNonGCPMode { - zone, err := ctx.Cloud.GetZone(context.Background()) - if err != nil { - klog.Errorf("Failed to retrieve zone information from Cloud provider: %v; Please check if local-zone is specified in gce.conf.", err) - } else { - zoneGetter = negtypes.NewSimpleZoneGetter(zone.FailureDomain) - } + zoneGetter = negtypes.NewSimpleZoneGetter(ctx.Cloud.Zone()) } enableAsm := false diff --git a/pkg/healthchecks/healthchecks.go b/pkg/healthchecks/healthchecks.go index cb7ae56451..9f4047f7a5 100644 --- a/pkg/healthchecks/healthchecks.go +++ b/pkg/healthchecks/healthchecks.go @@ -32,6 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/descutils" "k8s.io/klog/v2" ) @@ -43,13 +44,30 @@ type HealthChecks struct { // This is a workaround which allows us to not have to maintain // a separate health checker for the default backend. defaultBackendSvc types.NamespacedName + clusterInfo descutils.ClusterInfo } // NewHealthChecker creates a new health checker. // cloud: the cloud object implementing SingleHealthCheck. // defaultHealthCheckPath: is the HTTP path to use for health checks. func NewHealthChecker(cloud HealthCheckProvider, healthCheckPath string, defaultBackendSvc types.NamespacedName) *HealthChecks { - return &HealthChecks{cloud, healthCheckPath, defaultBackendSvc} + ci := generateClusterInfo(cloud.(*gce.Cloud)) + return &HealthChecks{cloud, healthCheckPath, defaultBackendSvc, ci} +} + +func generateClusterInfo(gceCloud *gce.Cloud) descutils.ClusterInfo { + var location string + regionalCluster := gceCloud.Regional() + if regionalCluster { + location = gceCloud.Region() + } else { + location = gceCloud.Zone() + } + name, err := gceCloud.ClusterID.GetID() + if err != nil { + klog.Errorf("Failed to obtain cluster name, %+v", err) + } + return descutils.ClusterInfo{Name: name, Location: location, Regional: regionalCluster} } // new returns a *HealthCheck with default settings and specified port/protocol @@ -67,9 +85,23 @@ func (h *HealthChecks) new(sp utils.ServicePort) *translator.HealthCheck { hc.Name = sp.BackendName() hc.Port = sp.NodePort hc.RequestPath = h.pathFromSvcPort(sp) + hc.SetHealthcheckInfo(h.generateHealthcheckInfo(hc)) return hc } +func (h *HealthChecks) generateHealthcheckInfo(hc *translator.HealthCheck) descutils.HealthcheckInfo { + ingressType := descutils.ExternalLB + if hc.ForILB { + ingressType = descutils.InternalLB + } + return descutils.HealthcheckInfo{ + ClusterInfo: h.clusterInfo, + ServiceInfo: descutils.ServiceInfo(h.defaultBackendSvc), + HealthcheckConfig: descutils.DefaultHC, + IngressType: ingressType, + } +} + // SyncServicePort implements HealthChecker. func (h *HealthChecks) SyncServicePort(sp *utils.ServicePort, probe *v1.Probe) (string, error) { hc := h.new(*sp) diff --git a/pkg/healthchecks/healthchecks_test.go b/pkg/healthchecks/healthchecks_test.go index f23e7637b1..ca39c0b643 100644 --- a/pkg/healthchecks/healthchecks_test.go +++ b/pkg/healthchecks/healthchecks_test.go @@ -453,7 +453,9 @@ func TestRolloutUpdateCustomHCDescription(t *testing.T) { flags.F.EnableUpdateCustomHealthCheckDescription = oldUpdateDescription }() - fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + testClusterValues := gce.DefaultTestClusterValues() + testClusterValues.Regional = true + fakeGCE := gce.NewFakeGCECloud(testClusterValues) var ( defaultSP *utils.ServicePort = testSPs["HTTP-80-reg-nil"] @@ -503,9 +505,17 @@ func TestRolloutUpdateCustomHCDescription(t *testing.T) { outputBCHCWithFlag := getSingletonHealthcheck(t, fakeGCE) - if outputBCHCWithFlag.Description != translator.DescriptionForHealthChecksFromBackendConfig { + wantDescription := "" + + fmt.Sprintf("{\n") + + fmt.Sprintf(" \"K8sCluster\": \"/locations/%s/clusters/%s\",\n", gce.DefaultTestClusterValues().Region, gce.DefaultTestClusterValues().ClusterID) + + fmt.Sprintf(" \"K8sResource\": \"/namespaces/%s/services/%s\",\n", defaultBackendSvc.Namespace, defaultBackendSvc.Name) + + fmt.Sprintf(" \"K8sResourceDependency\": \"Ingress External Load Balancer\",\n") + + fmt.Sprintf(" \"Config\": \"BackendConfig\"\n") + + fmt.Sprintf("}") + + if outputBCHCWithFlag.Description != wantDescription { t.Fatalf("incorrect Description, is: \"%v\", want: \"%v\"", - outputBCHCWithFlag.Description, translator.DescriptionForHealthChecksFromBackendConfig) + outputBCHCWithFlag.Description, wantDescription) } // Verify that only the Description is modified after rollout. @@ -980,11 +990,12 @@ func TestSyncServicePort(t *testing.T) { }() type tc struct { - desc string - setup func(*cloud.MockGCE) - sp *utils.ServicePort - probe *v1.Probe - regional bool + desc string + setup func(*cloud.MockGCE) + sp *utils.ServicePort + probe *v1.Probe + regional bool + regionalCluster bool wantSelfLink string wantErr bool @@ -1066,7 +1077,10 @@ func TestSyncServicePort(t *testing.T) { chc = fixture.hc() chc.HttpHealthCheck.RequestPath = "/foo" chc.Description = translator.DescriptionForHealthChecksFromBackendConfig - cases = append(cases, &tc{desc: "create backendconfig", sp: testSPs["HTTP-80-reg-bc"], wantComputeHC: chc}) + cases = append(cases, &tc{ + desc: "create backendconfig", + sp: testSPs["HTTP-80-reg-bc"], + wantComputeHC: chc}) // BackendConfig all chc = fixture.hc() @@ -1079,7 +1093,10 @@ func TestSyncServicePort(t *testing.T) { // PortSpecification is set by the controller chc.HttpHealthCheck.PortSpecification = "USE_FIXED_PORT" chc.Description = translator.DescriptionForHealthChecksFromBackendConfig - cases = append(cases, &tc{desc: "create backendconfig all", sp: testSPs["HTTP-80-reg-bcall"], wantComputeHC: chc}) + cases = append(cases, &tc{ + desc: "create backendconfig all", + sp: testSPs["HTTP-80-reg-bcall"], + wantComputeHC: chc}) i64 := func(i int64) *int64 { return &i } @@ -1118,6 +1135,18 @@ func TestSyncServicePort(t *testing.T) { wantComputeHC: chc, }) + // BackendConfig ilb + chc = fixture.ilb() + chc.HttpHealthCheck.RequestPath = "/foo" + chc.Description = translator.DescriptionForHealthChecksFromBackendConfig + cases = append(cases, &tc{ + desc: "create backendconfig ilb regional cluster", + sp: testSPs["HTTP-80-ilb-bc"], + regional: true, + regionalCluster: true, + wantComputeHC: chc, + }) + // Probe and BackendConfig override chc = fixture.hc() chc.HttpHealthCheck.RequestPath = "/foo" @@ -1325,15 +1354,40 @@ func TestSyncServicePort(t *testing.T) { // settings. for _, tc := range cases { + tc := *tc tc.updateHCDescription = true tc.desc = tc.desc + " with updateHCDescription" - cases = append(cases, tc) + copyOfWant := *tc.wantComputeHC + if tc.sp.BackendConfig != nil { + var wantLocation, wantIngressType string + if tc.regionalCluster { + wantLocation = gce.DefaultTestClusterValues().Region + } else { + wantLocation = gce.DefaultTestClusterValues().ZoneName + } + if tc.sp.L7ILBEnabled { + wantIngressType = "Ingress Internal Load Balancer" + } else { + wantIngressType = "Ingress External Load Balancer" + } + copyOfWant.Description = "" + + fmt.Sprintf("{\n") + + fmt.Sprintf(" \"K8sCluster\": \"/locations/%s/clusters/%s\",\n", wantLocation, gce.DefaultTestClusterValues().ClusterID) + + fmt.Sprintf(" \"K8sResource\": \"/namespaces/%s/services/%s\",\n", defaultBackendSvc.Namespace, defaultBackendSvc.Name) + + fmt.Sprintf(" \"K8sResourceDependency\": \"%s\",\n", wantIngressType) + + fmt.Sprintf(" \"Config\": \"BackendConfig\"\n") + + fmt.Sprintf("}") + } + tc.wantComputeHC = ©OfWant + cases = append(cases, &tc) } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { flags.F.EnableUpdateCustomHealthCheckDescription = tc.updateHCDescription - fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + testClusterValues := gce.DefaultTestClusterValues() + testClusterValues.Regional = tc.regionalCluster + fakeGCE := gce.NewFakeGCECloud(testClusterValues) mock := fakeGCE.Compute().(*cloud.MockGCE) setupMockUpdate(mock) @@ -1364,20 +1418,20 @@ func TestSyncServicePort(t *testing.T) { t.Fatalf("Got %d healthchecks, want 1\n%s", len(computeHCs), pretty.Sprint(computeHCs)) } - gotHC := computeHCs[0] // Filter out SelfLink because it is hard to deal with in the mock and // test cases. - filter := func(hc *compute.HealthCheck) { + filter := func(hc compute.HealthCheck) compute.HealthCheck { hc.SelfLink = "" if !tc.updateHCDescription { hc.Description = "" } + return hc } - filter(gotHC) - filter(tc.wantComputeHC) + gotHC := filter(*computeHCs[0]) + wantHC := filter(*tc.wantComputeHC) - if !reflect.DeepEqual(gotHC, tc.wantComputeHC) { - t.Fatalf("Compute healthcheck is:\n%s, want:\n%s", pretty.Sprint(gotHC), pretty.Sprint(tc.wantComputeHC)) + if !reflect.DeepEqual(gotHC, wantHC) { + t.Fatalf("Compute healthcheck is:\n%s, want:\n%s", pretty.Sprint(gotHC), pretty.Sprint(wantHC)) } } diff --git a/pkg/psc/controller.go b/pkg/psc/controller.go index 44e196c80a..ae0b5b0cec 100644 --- a/pkg/psc/controller.go +++ b/pkg/psc/controller.go @@ -128,11 +128,7 @@ func NewController(ctx *context.ControllerContext) *Controller { if controller.regionalCluster { controller.clusterLoc = controller.cloud.Region() } else { - zone, err := controller.cloud.GetZone(context2.Background()) - if err != nil { - klog.Errorf("Failed to retrieve zone information from cloud provider: %q", err) - } - controller.clusterLoc = zone.FailureDomain + controller.clusterLoc = controller.cloud.Zone() } ctx.SAInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/pkg/psc/controller_test.go b/pkg/psc/controller_test.go index 9717ac6037..2e01b0b78d 100644 --- a/pkg/psc/controller_test.go +++ b/pkg/psc/controller_test.go @@ -301,11 +301,8 @@ func TestServiceAttachmentCreation(t *testing.T) { t.Errorf("%s", err) } - zone, err := fakeCloud.GetZone(context2.TODO()) - if err != nil { - t.Errorf("failed to get zone %q", err) - } - desc := sautils.NewServiceAttachmentDesc(testNamespace, saName, ClusterName, zone.FailureDomain, false) + zone := fakeCloud.Zone() + desc := sautils.NewServiceAttachmentDesc(testNamespace, saName, ClusterName, zone, false) expectedSA := &ga.ServiceAttachment{ ConnectionPreference: tc.connectionPreference, diff --git a/pkg/translator/healthchecks.go b/pkg/translator/healthchecks.go index caa541559c..339314bee9 100644 --- a/pkg/translator/healthchecks.go +++ b/pkg/translator/healthchecks.go @@ -32,6 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/descutils" "k8s.io/klog/v2" ) @@ -90,6 +91,18 @@ type HealthCheck struct { // compute struct back. computealpha.HTTPHealthCheck computealpha.HealthCheck + healthcheckInfo descutils.HealthcheckInfo +} + +func (hc *HealthCheck) SetHealthcheckInfo(i descutils.HealthcheckInfo) { + hc.healthcheckInfo = i + hc.reconcileHCDescription() +} + +func (hc *HealthCheck) reconcileHCDescription() { + if flags.F.EnableUpdateCustomHealthCheckDescription && hc.healthcheckInfo.HealthcheckConfig == descutils.BackendConfigHC { + hc.Description = hc.healthcheckInfo.GenerateHealthcheckDescription() + } } // NewHealthCheck creates a HealthCheck which abstracts nested structs away @@ -227,6 +240,8 @@ func (hc *HealthCheck) UpdateFromBackendConfig(c *backendconfigv1.HealthCheckCon if flags.F.EnableUpdateCustomHealthCheckDescription { hc.Description = DescriptionForHealthChecksFromBackendConfig } + hc.healthcheckInfo.HealthcheckConfig = descutils.BackendConfigHC + hc.reconcileHCDescription() } // DefaultHealthCheck simply returns the default health check. @@ -337,4 +352,6 @@ func ApplyProbeSettingsToHC(p *v1.Probe, hc *HealthCheck) { } hc.Description = DescriptionForHealthChecksFromReadinessProbe + hc.healthcheckInfo.HealthcheckConfig = descutils.ReadinessProbeHC + hc.reconcileHCDescription() } diff --git a/pkg/utils/descutils/utils.go b/pkg/utils/descutils/utils.go index fdb371e726..32358d1fbc 100644 --- a/pkg/utils/descutils/utils.go +++ b/pkg/utils/descutils/utils.go @@ -16,7 +16,64 @@ limitations under the License. package descutils -import "fmt" +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" +) + +type ClusterInfo struct { + Name string + Location string + Regional bool +} + +type ServiceInfo types.NamespacedName + +type HealthcheckConfig string + +const ( + DefaultHC HealthcheckConfig = "Default" + ReadinessProbeHC HealthcheckConfig = "ReadinessProbe" + BackendConfigHC HealthcheckConfig = "BackendConfig" +) + +type IngressType string + +const ( + InternalLB IngressType = "Ingress Internal Load Balancer" + ExternalLB IngressType = "Ingress External Load Balancer" +) + +type HealthcheckInfo struct { + ClusterInfo + ServiceInfo + HealthcheckConfig + IngressType +} + +type HealthcheckDesc struct { + K8sCluster string + K8sResource string + K8sResourceDependency IngressType + Config HealthcheckConfig +} + +func (i *ClusterInfo) generateClusterDescription() string { + // locType here differs from locType in GenerateClusterLink(). + locType := "locations" + return fmt.Sprintf("/%s/%s/clusters/%s", locType, i.Location, i.Name) +} + +func NewServiceInfo(namespace, resourceName string) ServiceInfo { + return ServiceInfo{namespace, resourceName} +} + +func (i *ServiceInfo) generateK8sResourceDescription() string { + return GenerateK8sResourceLink(i.Namespace, "services", i.Name) +} func GenerateClusterLink(name, location string, regional bool) string { if name == "" || location == "" { @@ -33,3 +90,16 @@ func GenerateClusterLink(name, location string, regional bool) string { func GenerateK8sResourceLink(namespace, resourceType, resourceName string) string { return fmt.Sprintf("/namespaces/%s/%s/%s", namespace, resourceType, resourceName) } + +func (i *HealthcheckInfo) GenerateHealthcheckDescription() string { + desc := HealthcheckDesc{} + desc.K8sCluster = i.ClusterInfo.generateClusterDescription() + desc.K8sResource = i.ServiceInfo.generateK8sResourceDescription() + desc.K8sResourceDependency = i.IngressType + desc.Config = i.HealthcheckConfig + json, err := json.MarshalIndent(desc, "", " ") + if err != nil { + klog.Error("Failed to marshall HealthcheckDesc %s: %v", desc, err) + } + return string(json) +} diff --git a/pkg/utils/descutils/utils_test.go b/pkg/utils/descutils/utils_test.go index 8d29a7912d..509bd13072 100644 --- a/pkg/utils/descutils/utils_test.go +++ b/pkg/utils/descutils/utils_test.go @@ -78,3 +78,64 @@ func TestK8sResourceLink(t *testing.T) { t.Errorf("unexpected cluster link: wanted %s, but got %s", expectedLink, generatedLink) } } + +func TestGenerateClusterDescription(t *testing.T) { + testName := "cluster-name" + testRegion := "pl-central7" + clusterInfo := ClusterInfo{testName, testRegion, true} + expectedDescription := "/locations/pl-central7/clusters/cluster-name" + + generatedDescription := clusterInfo.generateClusterDescription() + if generatedDescription != expectedDescription { + t.Errorf("unexpected cluster description: wanted %s, but got %s", expectedDescription, generatedDescription) + } + + testZone := "pl-central7-g" + clusterInfo = ClusterInfo{testName, testZone, false} + expectedDescription = "/locations/pl-central7-g/clusters/cluster-name" + + generatedDescription = clusterInfo.generateClusterDescription() + if generatedDescription != expectedDescription { + t.Errorf("unexpected cluster description: wanted %s, but got %s", expectedDescription, generatedDescription) + } +} + +func TestGenerateK8sResourceDescription(t *testing.T) { + testName := "service-name" + testNamespace := "testNamespace" + serviceInfo := NewServiceInfo(testNamespace, testName) + expectedDescription := "/namespaces/testNamespace/services/service-name" + + generatedDescription := serviceInfo.generateK8sResourceDescription() + if generatedDescription != expectedDescription { + t.Errorf("unexpected service description: wanted %v, but got %v", expectedDescription, generatedDescription) + } +} + +func TestGenerateHealthcheckDescription(t *testing.T) { + testCluster := "cluster-name" + testRegion := "pl-central7" + clusterInfo := ClusterInfo{testCluster, testRegion, true} + testService := "service-name" + testNamespace := "testNamespace" + serviceInfo := NewServiceInfo(testNamespace, testService) + hcConfig := DefaultHC + i := HealthcheckInfo{ + ClusterInfo: clusterInfo, + ServiceInfo: serviceInfo, + HealthcheckConfig: hcConfig, + IngressType: InternalLB, + } + expectedDescription := "" + + "{\n" + + " \"K8sCluster\": \"/locations/pl-central7/clusters/cluster-name\",\n" + + " \"K8sResource\": \"/namespaces/testNamespace/services/service-name\",\n" + + " \"K8sResourceDependency\": \"Ingress Internal Load Balancer\",\n" + + " \"Config\": \"Default\"\n" + + "}" + + generatedDescription := i.GenerateHealthcheckDescription() + if generatedDescription != expectedDescription { + t.Errorf("unexpected healthcheck description: wanted %v, but got %v", expectedDescription, generatedDescription) + } +} diff --git a/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce.go b/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce.go index 9ebadc19e2..fd9b84fe7f 100644 --- a/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce.go +++ b/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce.go @@ -43,7 +43,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -726,6 +726,16 @@ func (g *Cloud) Region() string { return g.region } +// Regional returns true if the cluster is regional. +func (g *Cloud) Regional() bool { + return g.regional +} + +// Zone returns the zone. +func (g *Cloud) Zone() string { + return g.localZone +} + // OnXPN returns true if the cluster is running on a cross project network (XPN) func (g *Cloud) OnXPN() bool { return g.onXPN diff --git a/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce_fake.go b/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce_fake.go index d9aca0742f..8f4456bbf8 100644 --- a/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce_fake.go +++ b/vendor/k8s.io/cloud-provider-gcp/providers/gce/gce_fake.go @@ -26,6 +26,7 @@ import ( compute "google.golang.org/api/compute/v1" option "google.golang.org/api/option" "k8s.io/client-go/tools/cache" + "k8s.io/ingress-gce/pkg/utils/descutils" ) // TestClusterValues holds the config values for the fake/test gce cloud object. @@ -37,6 +38,7 @@ type TestClusterValues struct { ClusterID string ClusterName string OnXPN bool + Regional bool } // DefaultTestClusterValues Creates a reasonable set of default cluster values @@ -49,9 +51,17 @@ func DefaultTestClusterValues() TestClusterValues { SecondaryZoneName: "us-central1-c", ClusterID: "test-cluster-id", ClusterName: "Test Cluster Name", + Regional: false, } } +// DefaultClusterInfo creates descutils.ClusterInfo based on the output of +// DefaultTestClusterValues(). +func DefaultClusterInfo() descutils.ClusterInfo { + values := DefaultTestClusterValues() + return descutils.ClusterInfo{Name: values.ClusterID, Location: values.Region, Regional: true} +} + // Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling // gce.Initialize() func fakeClusterID(clusterID string) ClusterID { @@ -73,12 +83,14 @@ func NewFakeGCECloud(vals TestClusterValues) *Cloud { region: vals.Region, service: service, managedZones: []string{vals.ZoneName}, + localZone: vals.ZoneName, projectID: vals.ProjectID, networkProjectID: vals.ProjectID, ClusterID: fakeClusterID(vals.ClusterID), onXPN: vals.OnXPN, metricsCollector: newLoadBalancerMetrics(), projectsBasePath: getProjectsBasePath(service.BasePath), + regional: vals.Regional, } c := cloud.NewMockGCE(&gceProjectRouter{gce}) gce.c = c