From b8a4d8a8d2ef1962acfb9871f655a6a8b1aa037c Mon Sep 17 00:00:00 2001 From: Pavithra Ramesh Date: Mon, 30 Dec 2019 14:29:39 -0800 Subject: [PATCH 1/3] Moved constants to remove cyclic dependency. translator imports loadbalancer which imports firewalls, which again depends on translator. Removed the translator -> loadbalancer dependency. --- pkg/controller/translator/translator.go | 23 ++++++++++++++++++----- pkg/loadbalancers/l7.go | 8 -------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 42561ad09a..2a1e1199ff 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -36,11 +36,20 @@ import ( "k8s.io/ingress-gce/pkg/backendconfig" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/controller/errors" - "k8s.io/ingress-gce/pkg/loadbalancers" "k8s.io/ingress-gce/pkg/utils" namer_util "k8s.io/ingress-gce/pkg/utils/namer" ) +const ( + // DefaultHost is the host used if none is specified. It is a valid value + // for the "Host" field recognized by GCE. + DefaultHost = "*" + + // DefaultPath is the path used if none is specified. It is a valid path + // recognized by GCE. + DefaultPath = "/*" +) + // getServicePortParams allows for passing parameters to getServicePort() type getServicePortParams struct { isL7ILB bool @@ -206,14 +215,14 @@ func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend // sent to one of the last backend in the rules list. path := p.Path if path == "" { - path = loadbalancers.DefaultPath + path = DefaultPath } pathRules = append(pathRules, utils.PathRule{Path: path, Backend: *svcPort}) } } host := rule.Host if host == "" { - host = loadbalancers.DefaultHost + host = DefaultHost } urlMap.PutPathRulesForHost(host, pathRules) } @@ -266,9 +275,13 @@ func (t *Translator) GetZoneForNode(name string) (string, error) { // ListZones returns a list of zones this Kubernetes cluster spans. func (t *Translator) ListZones() ([]string, error) { - zones := sets.String{} nodeLister := t.ctx.NodeInformer.GetIndexer() - readyNodes, err := listers.NewNodeLister(nodeLister).ListWithPredicate(utils.GetNodeConditionPredicate()) + return t.ListZonesWithLister(listers.NewNodeLister(nodeLister)) +} + +func (t *Translator) ListZonesWithLister(lister listers.NodeLister) ([]string, error) { + zones := sets.String{} + readyNodes, err := lister.ListWithPredicate(utils.GetNodeConditionPredicate()) if err != nil { return zones.List(), err } diff --git a/pkg/loadbalancers/l7.go b/pkg/loadbalancers/l7.go index af5e1ee045..74ebdf90b2 100644 --- a/pkg/loadbalancers/l7.go +++ b/pkg/loadbalancers/l7.go @@ -40,14 +40,6 @@ import ( ) const ( - // DefaultHost is the host used if none is specified. It is a valid value - // for the "Host" field recognized by GCE. - DefaultHost = "*" - - // DefaultPath is the path used if none is specified. It is a valid path - // recognized by GCE. - DefaultPath = "/*" - invalidConfigErrorMessage = "invalid ingress frontend configuration, please check your usage of the 'kubernetes.io/ingress.allow-http' annotation." ) From eb6e2291bcf567560c399280a23abf7a04766f90 Mon Sep 17 00:00:00 2001 From: Pavithra Ramesh Date: Mon, 30 Dec 2019 14:29:11 -0800 Subject: [PATCH 2/3] Added new controller for L4 ILB services Fixes including adding CustomSubnet support and self links for backend service, healthcheck. Removed the operation from queue key Added connectiondraining to backend service. Added a new flag to run L4 controller. Used namer.PrimaryIPNEG() instead of namer.NEG() Use a common description for all ILB resources. Added locking for shared healthchecks. Only process relevant updates in l4 controller Renamed files, used patch instead of update. unit tests for L4 ILB Most of the tests in l4_test.go are from gce_loadbalancer_internal_test.go --- cmd/glbc/main.go | 7 + pkg/annotations/service.go | 23 + pkg/backends/backends.go | 97 ++ pkg/backends/features/features.go | 7 +- pkg/backends/syncer.go | 8 +- pkg/backends/syncer_test.go | 10 +- pkg/controller/translator/translator.go | 4 +- pkg/firewalls/firewalls_l4.go | 100 ++ pkg/healthchecks/healthchecks_l4.go | 162 ++++ pkg/healthchecks/healthchecks_l4_test.go | 105 +++ pkg/l4/l4controller.go | 366 ++++++++ pkg/l4/l4controller_test.go | 270 ++++++ pkg/loadbalancers/fakes.go | 7 + pkg/loadbalancers/forwarding_rules.go | 168 ++++ pkg/loadbalancers/l4.go | 230 +++++ pkg/loadbalancers/l4_test.go | 1098 ++++++++++++++++++++++ pkg/test/utils.go | 126 +++ pkg/utils/common/finalizer.go | 42 +- pkg/utils/namer/interfaces.go | 2 + pkg/utils/namer/namer.go | 10 +- pkg/utils/patch.go | 38 + pkg/utils/serviceport.go | 21 +- pkg/utils/utils.go | 130 ++- pkg/utils/utils_test.go | 30 +- 24 files changed, 3031 insertions(+), 30 deletions(-) create mode 100644 pkg/firewalls/firewalls_l4.go create mode 100644 pkg/healthchecks/healthchecks_l4.go create mode 100644 pkg/healthchecks/healthchecks_l4_test.go create mode 100644 pkg/l4/l4controller.go create mode 100644 pkg/l4/l4controller_test.go create mode 100644 pkg/loadbalancers/l4.go create mode 100644 pkg/loadbalancers/l4_test.go diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index d1581dced9..0ac9c18c2b 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -49,6 +49,7 @@ import ( "k8s.io/ingress-gce/pkg/firewalls" "k8s.io/ingress-gce/pkg/flags" _ "k8s.io/ingress-gce/pkg/klog" + "k8s.io/ingress-gce/pkg/l4" "k8s.io/ingress-gce/pkg/version" ) @@ -217,6 +218,12 @@ func runControllers(ctx *ingctx.ControllerContext) { fwc := firewalls.NewFirewallController(ctx, flags.F.NodePortRanges.Values()) + if flags.F.RunL4Controller { + l4Controller := l4.NewController(ctx, stopCh) + go l4Controller.Run() + klog.V(0).Infof("L4 controller started") + } + // TODO: Refactor NEG to use cloud mocks so ctx.Cloud can be referenced within NewController. negController := neg.NewController(negtypes.NewAdapter(ctx.Cloud), ctx, lbc.Translator, ctx.ClusterNamer, flags.F.ResyncPeriod, flags.F.NegGCPeriod, flags.F.EnableReadinessReflector, flags.F.RunIngressController, flags.F.RunL4Controller) diff --git a/pkg/annotations/service.go b/pkg/annotations/service.go index a7b79ed199..d6f7578eea 100644 --- a/pkg/annotations/service.go +++ b/pkg/annotations/service.go @@ -182,6 +182,29 @@ func WantsL4ILB(service *v1.Service) (bool, string) { return false, fmt.Sprintf("Type : %s, LBType : %s", service.Spec.Type, ltype) } +// OnlyNEGStatusChanged returns true if the only annotation change between the 2 services is the NEG status annotation. +// This will be true if neg annotation was added or removed in the new service. +// Note : This assumes that the annotations in old and new service are different. If they are identical, this will +// return true. +func OnlyNEGStatusChanged(oldService, newService *v1.Service) bool { + return onlyNEGStatusChanged(oldService, newService) && onlyNEGStatusChanged(newService, oldService) +} + +// onlyNEGStatusChanged returns true if the NEG Status annotation is the only extra annotation present in the new +// service but not in the old service. +// Note : This assumes that the annotations in old and new service are different. If they are identical, this will +// return true. +func onlyNEGStatusChanged(oldService, newService *v1.Service) bool { + for key, _ := range newService.ObjectMeta.Annotations { + if _, ok := oldService.ObjectMeta.Annotations[key]; !ok { + if key != NEGStatusKey { + return false + } + } + } + return true +} + // ApplicationProtocols returns a map of port (name or number) to the protocol // on the port. func (svc *Service) ApplicationProtocols() (map[string]AppProtocol, error) { diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index f8687cdca8..7ca269a64d 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -19,6 +19,8 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "google.golang.org/api/compute/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/backends/features" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" @@ -27,6 +29,10 @@ import ( "k8s.io/legacy-cloud-providers/gce" ) +const ( + DefaultConnectionDrainingTimeoutSeconds = 30 +) + // Backends handles CRUD operations for backends. type Backends struct { cloud *gce.Cloud @@ -224,3 +230,94 @@ func (b *Backends) List(key *meta.Key, version meta.Version) ([]*composite.Backe } return clusterBackends, nil } + +// EnsureL4BackendService creates or updates the backend service with the given name. +func (b *Backends) EnsureL4BackendService(name, hcLink, protocol, sessionAffinity, scheme string, nm types.NamespacedName, version meta.Version) (*composite.BackendService, error) { + klog.V(2).Infof("EnsureL4BackendService(%v, %v, %v): checking existing backend service", name, scheme, protocol) + key, err := composite.CreateKey(b.cloud, name, meta.Regional) + if err != nil { + return nil, err + } + bs, err := composite.GetBackendService(b.cloud, key, meta.VersionGA) + if err != nil && !utils.IsNotFoundError(err) { + return nil, err + } + desc, err := utils.MakeL4ILBServiceDescription(nm.String(), "", meta.VersionGA) + if err != nil { + klog.Warningf("EnsureL4BackendService: Failed to generate description for BackendService %s, err %v", + name, err) + } + expectedBS := &composite.BackendService{ + Name: name, + Protocol: string(protocol), + Description: desc, + HealthChecks: []string{hcLink}, + SessionAffinity: utils.TranslateAffinityType(sessionAffinity), + LoadBalancingScheme: string(scheme), + ConnectionDraining: &composite.ConnectionDraining{DrainingTimeoutSec: DefaultConnectionDrainingTimeoutSeconds}, + } + + // Create backend service if none was found + if bs == nil { + klog.V(2).Infof("EnsureL4BackendService: creating backend service %v", name) + err := composite.CreateBackendService(b.cloud, key, expectedBS) + if err != nil { + return nil, err + } + klog.V(2).Infof("EnsureL4BackendService: created backend service %v successfully", name) + // We need to perform a GCE call to re-fetch the object we just created + // so that the "Fingerprint" field is filled in. This is needed to update the + // object without error. The lookup is also needed to populate the selfLink. + return composite.GetBackendService(b.cloud, key, meta.VersionGA) + } + + if backendSvcEqual(expectedBS, bs) { + return bs, nil + } + if bs.ConnectionDraining != nil && bs.ConnectionDraining.DrainingTimeoutSec > 0 { + // if user overrides this value, continue using that. + expectedBS.ConnectionDraining.DrainingTimeoutSec = bs.ConnectionDraining.DrainingTimeoutSec + } + klog.V(2).Infof("EnsureL4BackendService: updating backend service %v", name) + // Set fingerprint for optimistic locking + expectedBS.Fingerprint = bs.Fingerprint + if err := composite.UpdateBackendService(b.cloud, key, expectedBS); err != nil { + return nil, err + } + klog.V(2).Infof("EnsureL4BackendService: updated backend service %v successfully", name) + return composite.GetBackendService(b.cloud, key, meta.VersionGA) +} + +// backendsListEqual asserts that backend lists are equal by group link only +func backendsListEqual(a, b []*composite.Backend) bool { + if len(a) != len(b) { + return false + } + if len(a) == 0 { + return true + } + + aSet := sets.NewString() + for _, v := range a { + aSet.Insert(v.Group) + } + bSet := sets.NewString() + for _, v := range b { + bSet.Insert(v.Group) + } + + return aSet.Equal(bSet) +} + +// backendSvcEqual returns true if the 2 BackendService objects are equal. +// ConnectionDraining timeout is not checked for equality, if user changes +// this timeout and no other backendService parameters change, the backend +// service will not be updated. +func backendSvcEqual(a, b *composite.BackendService) bool { + return a.Protocol == b.Protocol && + a.Description == b.Description && + a.SessionAffinity == b.SessionAffinity && + a.LoadBalancingScheme == b.LoadBalancingScheme && + utils.EqualStringSets(a.HealthChecks, b.HealthChecks) && + backendsListEqual(a.Backends, b.Backends) +} diff --git a/pkg/backends/features/features.go b/pkg/backends/features/features.go index e3c5776cc2..471afc39de 100644 --- a/pkg/backends/features/features.go +++ b/pkg/backends/features/features.go @@ -36,6 +36,8 @@ const ( // FeatureL7ILB defines the feature name of L7 Internal Load Balancer // L7-ILB Resources are currently alpha and regional FeatureL7ILB = "L7ILB" + //FeaturePrimaryVMIPNEG defines the feature name of GCE_PRIMARY_VM_IP NEGs which are used for L4 ILB. + FeaturePrimaryVMIPNEG = "PrimaryVMIPNEG" ) var ( @@ -46,7 +48,7 @@ var ( } // TODO: (shance) refactor all scope to be above the serviceport level scopeToFeatures = map[meta.KeyType][]string{ - meta.Regional: []string{FeatureL7ILB}, + meta.Regional: []string{FeatureL7ILB, FeaturePrimaryVMIPNEG}, } ) @@ -68,6 +70,9 @@ func featuresFromServicePort(sp *utils.ServicePort) []string { if sp.NEGEnabled { features = append(features, FeatureNEG) } + if sp.PrimaryIPNEGEnabled { + features = append(features, FeaturePrimaryVMIPNEG) + } if sp.L7ILBEnabled { features = append(features, FeatureL7ILB) } diff --git a/pkg/backends/syncer.go b/pkg/backends/syncer.go index 47218b7b78..2c980e25fc 100644 --- a/pkg/backends/syncer.go +++ b/pkg/backends/syncer.go @@ -15,6 +15,7 @@ package backends import ( "fmt" + "strings" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" v1 "k8s.io/api/core/v1" @@ -169,6 +170,12 @@ func (s *backendSyncer) GC(svcPorts []utils.ServicePort) error { // gc deletes the provided backends func (s *backendSyncer) gc(backends []*composite.BackendService, knownPorts sets.String) error { for _, be := range backends { + // Skip L4 LB backend services + // backendSyncer currently only GC backend services for L7 XLB/ILB. + // L4 LB is GC as part of the deletion flow as there is no shared backend services among L4 ILBs. + if strings.Contains(be.Description, utils.L4ILBServiceDescKey) { + continue + } var key *meta.Key name := be.Name scope, err := composite.ScopeFromSelfLink(be.SelfLink) @@ -181,7 +188,6 @@ func (s *backendSyncer) gc(backends []*composite.BackendService, knownPorts sets if knownPorts.Has(key.String()) { continue } - klog.V(2).Infof("GCing backendService for port %s", name) err = s.backendPool.Delete(name, be.Version, scope) if err != nil { diff --git a/pkg/backends/syncer_test.go b/pkg/backends/syncer_test.go index 5771c513cf..cb68089d6a 100644 --- a/pkg/backends/syncer_test.go +++ b/pkg/backends/syncer_test.go @@ -105,7 +105,12 @@ func (p *portset) check(fakeGCE *gce.Cloud) error { return fmt.Errorf("backend for port %+v should exist, but got: %v", sp.NodePort, err) } } else { - if bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)); !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)) + if err == nil || !utils.IsHTTPErrorCode(err, http.StatusNotFound) { + if sp.PrimaryIPNEGEnabled { + // It is expected that these Backends should not get cleaned up in the GC loop. + continue + } return fmt.Errorf("backend for port %+v should not exist, but got %v", sp, bs) } } @@ -333,7 +338,7 @@ func TestGC(t *testing.T) { } } -// Test GC with both ELB and ILBs +// Test GC with both ELB and ILBs. Add in an L4 ILB NEG which should not be deleted as part of GC. func TestGCMixed(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) @@ -345,6 +350,7 @@ func TestGCMixed(t *testing.T) { {NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, {NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, + {ID: utils.ServicePortID{Service: types.NamespacedName{Name: "testsvc"}}, PrimaryIPNEGEnabled: true, BackendNamer: defaultNamer}, } ps := newPortset(svcNodePorts) if err := ps.add(svcNodePorts); err != nil { diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 2a1e1199ff..af23e8bc2f 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -276,10 +276,10 @@ func (t *Translator) GetZoneForNode(name string) (string, error) { // ListZones returns a list of zones this Kubernetes cluster spans. func (t *Translator) ListZones() ([]string, error) { nodeLister := t.ctx.NodeInformer.GetIndexer() - return t.ListZonesWithLister(listers.NewNodeLister(nodeLister)) + return t.listZonesWithLister(listers.NewNodeLister(nodeLister)) } -func (t *Translator) ListZonesWithLister(lister listers.NodeLister) ([]string, error) { +func (t *Translator) listZonesWithLister(lister listers.NodeLister) ([]string, error) { zones := sets.String{} readyNodes, err := lister.ListWithPredicate(utils.GetNodeConditionPredicate()) if err != nil { diff --git a/pkg/firewalls/firewalls_l4.go b/pkg/firewalls/firewalls_l4.go new file mode 100644 index 0000000000..9431239207 --- /dev/null +++ b/pkg/firewalls/firewalls_l4.go @@ -0,0 +1,100 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package firewalls + +import ( + "strings" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog" + "k8s.io/legacy-cloud-providers/gce" +) + +func EnsureL4InternalFirewallRule(cloud *gce.Cloud, fwName, lbIP, nsName string, sourceRanges, portRanges, nodeNames []string, proto string) error { + existingFw, err := cloud.GetFirewall(fwName) + if err != nil && !utils.IsNotFoundError(err) { + return err + } + + nodeTags, err := cloud.GetNodeTags(nodeNames) + if err != nil { + return err + } + fwDesc, err := utils.MakeL4ILBServiceDescription(nsName, lbIP, meta.VersionGA) + if err != nil { + klog.Warningf("EnsureL4InternalFirewallRule: Failed to generate description for rule %s, err: %v", + fwName, err) + } + expectedFw := &compute.Firewall{ + Name: fwName, + Description: fwDesc, + Network: cloud.NetworkURL(), + SourceRanges: sourceRanges, + TargetTags: nodeTags, + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: strings.ToLower(proto), + Ports: portRanges, + }, + }, + } + if existingFw == nil { + klog.V(2).Infof("EnsureL4InternalFirewallRule(%v): creating firewall", fwName) + err = cloud.CreateFirewall(expectedFw) + if utils.IsForbiddenError(err) && cloud.OnXPN() { + gcloudCmd := gce.FirewallToGCloudCreateCmd(expectedFw, cloud.NetworkProjectID()) + + klog.V(3).Infof("EnsureL4InternalFirewallRule(%v): Could not create L4 firewall on XPN cluster: %v. Raising event for cmd: %q", fwName, err, gcloudCmd) + return newFirewallXPNError(err, gcloudCmd) + } + return err + } + if firewallRuleEqual(expectedFw, existingFw) { + return nil + } + klog.V(2).Infof("EnsureL4InternalFirewallRule(%v): updating firewall", fwName) + err = cloud.UpdateFirewall(expectedFw) + if utils.IsForbiddenError(err) && cloud.OnXPN() { + gcloudCmd := gce.FirewallToGCloudUpdateCmd(expectedFw, cloud.NetworkProjectID()) + klog.V(3).Infof("EnsureL4InternalFirewallRule(%v): Could not update L4 firewall on XPN cluster: %v. Raising event for cmd: %q", fwName, err, gcloudCmd) + return newFirewallXPNError(err, gcloudCmd) + } + return err +} + +func EnsureL4InternalFirewallRuleDeleted(cloud *gce.Cloud, fwName string) error { + if err := utils.IgnoreHTTPNotFound(cloud.DeleteFirewall(fwName)); err != nil { + if utils.IsForbiddenError(err) && cloud.OnXPN() { + gcloudCmd := gce.FirewallToGCloudDeleteCmd(fwName, cloud.NetworkProjectID()) + klog.V(3).Infof("EnsureL4InternalFirewallRuleDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", fwName) + return newFirewallXPNError(err, gcloudCmd) + } + return err + } + return nil +} + +func firewallRuleEqual(a, b *compute.Firewall) bool { + return a.Description == b.Description && + len(a.Allowed) == 1 && len(a.Allowed) == len(b.Allowed) && + a.Allowed[0].IPProtocol == b.Allowed[0].IPProtocol && + utils.EqualStringSets(a.Allowed[0].Ports, b.Allowed[0].Ports) && + utils.EqualStringSets(a.SourceRanges, b.SourceRanges) && + utils.EqualStringSets(a.TargetTags, b.TargetTags) +} diff --git a/pkg/healthchecks/healthchecks_l4.go b/pkg/healthchecks/healthchecks_l4.go new file mode 100644 index 0000000000..c82bfeb821 --- /dev/null +++ b/pkg/healthchecks/healthchecks_l4.go @@ -0,0 +1,162 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecks + +import ( + "fmt" + + cloudprovider "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/apimachinery/pkg/types" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + + // L4 Load Balancer parameters + gceHcCheckIntervalSeconds = int64(8) + gceHcTimeoutSeconds = int64(1) + // Start sending requests as soon as one healthcheck succeeds. + gceHcHealthyThreshold = int64(1) + // Defaults to 3 * 8 = 24 seconds before the LB will steer traffic away. + gceHcUnhealthyThreshold = int64(3) + sharedHcSuffix = "l4-shared-hc" + firewallHcSuffix = sharedHcSuffix + "-fw" +) + +// EnsureL4HealthCheck creates a new HTTP health check for an L4 LoadBalancer service, based on the parameters provided. +// If the healthcheck already exists, it is updated as needed. +func EnsureL4HealthCheck(cloud *gce.Cloud, name string, svcName types.NamespacedName, shared bool, path string, port int32) (*composite.HealthCheck, string, error) { + selfLink := "" + key, err := composite.CreateKey(cloud, name, meta.Global) + if err != nil { + return nil, selfLink, fmt.Errorf("Failed to create composite key for healthcheck %s - %v", name, err) + } + hc, err := composite.GetHealthCheck(cloud, key, meta.VersionGA) + if err != nil { + if !utils.IsNotFoundError(err) { + return nil, selfLink, err + } + } + expectedHC := NewL4HealthCheck(name, svcName, shared, path, port) + if hc == nil { + // Create the healthcheck + klog.V(2).Infof("Creating healthcheck %s for service %s, shared = %v", name, svcName, shared) + err = composite.CreateHealthCheck(cloud, key, expectedHC) + if err != nil { + return nil, selfLink, err + } + selfLink = cloudprovider.SelfLink(meta.VersionGA, cloud.ProjectID(), "healthChecks", key) + return expectedHC, selfLink, nil + } + selfLink = hc.SelfLink + if !needToUpdateHealthChecks(hc, expectedHC) { + // nothing to do + return hc, selfLink, nil + } + mergeHealthChecks(hc, expectedHC) + klog.V(2).Infof("Updating healthcheck %s for service %s", name, svcName) + err = composite.UpdateHealthCheck(cloud, key, expectedHC) + if err != nil { + return nil, selfLink, err + } + return expectedHC, selfLink, err +} + +func DeleteHealthCheck(cloud *gce.Cloud, name string) error { + key, err := composite.CreateKey(cloud, name, meta.Global) + if err != nil { + return fmt.Errorf("Failed to create composite key for healthcheck %s - %v", name, err) + } + return composite.DeleteHealthCheck(cloud, key, meta.VersionGA) +} + +// HealthCheckName returns the name of the healthcheck as well as the name of the associated firewall rule +// that opens up access for the healthheck. +func HealthCheckName(shared bool, clusteruid, lbName string) (string, string) { + hcName := lbName + fwName := lbName + if shared { + hcName = "k8s1-" + clusteruid + sharedHcSuffix + fwName = "k8s1-" + clusteruid + firewallHcSuffix + } + return hcName, fwName +} + +func NewL4HealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) *composite.HealthCheck { + httpSettings := composite.HTTPHealthCheck{ + Port: int64(port), + RequestPath: path, + } + desc := "" + var err error + + if !shared { + desc, err = utils.MakeL4ILBServiceDescription(svcName.String(), "", meta.VersionGA) + if err != nil { + klog.Warningf("Failed to generate description for L4HealthCheck %s, err %v", name, err) + } + } + return &composite.HealthCheck{ + Name: name, + CheckIntervalSec: gceHcCheckIntervalSeconds, + TimeoutSec: gceHcTimeoutSeconds, + HealthyThreshold: gceHcHealthyThreshold, + UnhealthyThreshold: gceHcUnhealthyThreshold, + HttpHealthCheck: &httpSettings, + Type: "HTTP", + Description: desc, + } +} + +// mergeHealthChecks reconciles HealthCheck config to be no smaller than +// the default values. newHC is assumed to have defaults, +// since it is created by the NewL4HealthCheck call. +// E.g. old health check interval is 2s, new has the default of 8. +// The HC interval will be reconciled to 8 seconds. +// If the existing health check values are larger than the default interval, +// the existing configuration will be kept. +func mergeHealthChecks(hc, newHC *composite.HealthCheck) { + if hc.CheckIntervalSec > newHC.CheckIntervalSec { + newHC.CheckIntervalSec = hc.CheckIntervalSec + } + if hc.TimeoutSec > newHC.TimeoutSec { + newHC.TimeoutSec = hc.TimeoutSec + } + if hc.UnhealthyThreshold > newHC.UnhealthyThreshold { + newHC.UnhealthyThreshold = hc.UnhealthyThreshold + } + if hc.HealthyThreshold > newHC.HealthyThreshold { + newHC.HealthyThreshold = hc.HealthyThreshold + } +} + +// needToUpdateHealthChecks checks whether the healthcheck needs to be updated. +func needToUpdateHealthChecks(hc, newHC *composite.HealthCheck) bool { + return hc.HttpHealthCheck == nil || + newHC.HttpHealthCheck == nil || + hc.HttpHealthCheck.Port != newHC.HttpHealthCheck.Port || + hc.HttpHealthCheck.RequestPath != newHC.HttpHealthCheck.RequestPath || + hc.Description != newHC.Description || + hc.CheckIntervalSec < newHC.CheckIntervalSec || + hc.TimeoutSec < newHC.TimeoutSec || + hc.UnhealthyThreshold < newHC.UnhealthyThreshold || + hc.HealthyThreshold < newHC.HealthyThreshold +} diff --git a/pkg/healthchecks/healthchecks_l4_test.go b/pkg/healthchecks/healthchecks_l4_test.go new file mode 100644 index 0000000000..1c863cbb8c --- /dev/null +++ b/pkg/healthchecks/healthchecks_l4_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthchecks + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/ingress-gce/pkg/composite" + "testing" +) + +func TestMergeHealthChecks(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + checkIntervalSec int64 + timeoutSec int64 + healthyThreshold int64 + unhealthyThreshold int64 + wantCheckIntervalSec int64 + wantTimeoutSec int64 + wantHealthyThreshold int64 + wantUnhealthyThreshold int64 + }{ + {"unchanged", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"interval - too small - should reconcile", gceHcCheckIntervalSeconds - 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"timeout - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds - 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"healthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold - 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"unhealthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold - 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"interval - user configured - should keep", gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"timeout - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold}, + {"healthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold}, + {"unhealthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1}, + } { + t.Run(tc.desc, func(t *testing.T) { + wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + hc := &composite.HealthCheck{ + CheckIntervalSec: tc.checkIntervalSec, + TimeoutSec: tc.timeoutSec, + HealthyThreshold: tc.healthyThreshold, + UnhealthyThreshold: tc.unhealthyThreshold, + } + mergeHealthChecks(hc, wantHC) + if wantHC.CheckIntervalSec != tc.wantCheckIntervalSec { + t.Errorf("wantHC.CheckIntervalSec = %d; want %d", wantHC.CheckIntervalSec, tc.checkIntervalSec) + } + if wantHC.TimeoutSec != tc.wantTimeoutSec { + t.Errorf("wantHC.TimeoutSec = %d; want %d", wantHC.TimeoutSec, tc.timeoutSec) + } + if wantHC.HealthyThreshold != tc.wantHealthyThreshold { + t.Errorf("wantHC.HealthyThreshold = %d; want %d", wantHC.HealthyThreshold, tc.healthyThreshold) + } + if wantHC.UnhealthyThreshold != tc.wantUnhealthyThreshold { + t.Errorf("wantHC.UnhealthyThreshold = %d; want %d", wantHC.UnhealthyThreshold, tc.unhealthyThreshold) + } + }) + } +} + +func TestCompareHealthChecks(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + modifier func(*composite.HealthCheck) + wantChanged bool + }{ + {"unchanged", nil, false}, + {"nil HttpHealthCheck", func(hc *composite.HealthCheck) { hc.HttpHealthCheck = nil }, true}, + {"desc does not match", func(hc *composite.HealthCheck) { hc.Description = "bad-desc" }, true}, + {"port does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.Port = 54321 }, true}, + {"requestPath does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.RequestPath = "/anotherone" }, true}, + {"interval needs update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 }, true}, + {"timeout needs update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds - 1 }, true}, + {"healthy threshold needs update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold - 1 }, true}, + {"unhealthy threshold needs update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold - 1 }, true}, + {"interval does not need update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds + 1 }, false}, + {"timeout does not need update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds + 1 }, false}, + {"healthy threshold does not need update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold + 1 }, false}, + {"unhealthy threshold does not need update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold + 1 }, false}, + } { + t.Run(tc.desc, func(t *testing.T) { + hc := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345) + if tc.modifier != nil { + tc.modifier(hc) + } + if gotChanged := needToUpdateHealthChecks(hc, wantHC); gotChanged != tc.wantChanged { + t.Errorf("needToUpdateHealthChecks(%#v, %#v) = %t; want changed = %t", hc, wantHC, gotChanged, tc.wantChanged) + } + }) + } +} diff --git a/pkg/l4/l4controller.go b/pkg/l4/l4controller.go new file mode 100644 index 0000000000..aeff578610 --- /dev/null +++ b/pkg/l4/l4controller.go @@ -0,0 +1,366 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package l4 + +import ( + "fmt" + "reflect" + "sync" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/ingress-gce/pkg/annotations" + "k8s.io/ingress-gce/pkg/backends" + "k8s.io/ingress-gce/pkg/context" + "k8s.io/ingress-gce/pkg/controller/translator" + "k8s.io/ingress-gce/pkg/loadbalancers" + negtypes "k8s.io/ingress-gce/pkg/neg/types" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" +) + +// L4Controller manages the create/update delete of all L4 Internal LoadBalancer services. +type L4Controller struct { + ctx *context.ControllerContext + // kubeClient, needed for attaching finalizer + client kubernetes.Interface + svcQueue utils.TaskQueue + serviceLister cache.Indexer + nodeLister listers.NodeLister + stopCh chan struct{} + // needed for listing the zones in the cluster. + translator *translator.Translator + // needed for linking the NEG with the backend service for each ILB service. + NegLinker backends.Linker + backendPool *backends.Backends + sharedResourcesLock sync.Mutex +} + +// NewController creates a new instance of the L4 ILB controller. +func NewController(ctx *context.ControllerContext, stopCh chan struct{}) *L4Controller { + l4c := &L4Controller{ + ctx: ctx, + client: ctx.KubeClient, + serviceLister: ctx.ServiceInformer.GetIndexer(), + nodeLister: listers.NewNodeLister(ctx.NodeInformer.GetIndexer()), + stopCh: stopCh, + } + l4c.translator = translator.NewTranslator(ctx) + l4c.backendPool = backends.NewPool(ctx.Cloud, ctx.ClusterNamer) + l4c.NegLinker = backends.NewNEGLinker(l4c.backendPool, negtypes.NewAdapter(ctx.Cloud), ctx.Cloud) + + l4c.svcQueue = utils.NewPeriodicTaskQueue("l4", "services", l4c.sync) + ctx.ServiceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + addSvc := obj.(*v1.Service) + svcKey := utils.ServiceKeyFunc(addSvc.Namespace, addSvc.Name) + needsILB, svcType := annotations.WantsL4ILB(addSvc) + // Check for deletion since updates or deletes show up as Add when controller restarts. + if needsILB || needsDeletion(addSvc) { + klog.V(3).Infof("ILB Service %s added, enqueuing", svcKey) + l4c.ctx.Recorder(addSvc.Namespace).Eventf(addSvc, v1.EventTypeNormal, "ADD", svcKey) + l4c.svcQueue.Enqueue(addSvc) + } else { + klog.V(4).Infof("Ignoring add for non-lb service %s based on %v", svcKey, svcType) + } + }, + // Deletes will be handled in the Update when the deletion timestamp is set. + UpdateFunc: func(old, cur interface{}) { + curSvc := cur.(*v1.Service) + svcKey := utils.ServiceKeyFunc(curSvc.Namespace, curSvc.Name) + oldSvc := old.(*v1.Service) + if l4c.needsUpdate(curSvc, oldSvc) || needsDeletion(curSvc) { + klog.V(3).Infof("Service %v changed, enqueuing", svcKey) + l4c.svcQueue.Enqueue(curSvc) + return + } + // Enqueue ILB services periodically for reasserting that resources exist. + needsILB, _ := annotations.WantsL4ILB(curSvc) + if needsILB && reflect.DeepEqual(old, cur) { + // this will happen when informers run a resync on all the existing services even when the object is + // not modified. + klog.V(3).Infof("Periodic enqueueing of %v", svcKey) + l4c.svcQueue.Enqueue(curSvc) + } + }, + }) + // TODO enhance this by looking at some metric from service controller to ensure it is up. + // We cannot use existence of a backend service or other resource, since those are on a per-service basis. + ctx.AddHealthCheck("service-controller health", func() error { return nil }) + return l4c +} + +func (l4c *L4Controller) Run() { + defer l4c.shutdown() + go l4c.svcQueue.Run() + <-l4c.stopCh +} + +// This should only be called when the process is being terminated. +func (l4c *L4Controller) shutdown() { + klog.Infof("Shutting down L4 Service Controller") + l4c.svcQueue.Shutdown() +} + +// processServiceCreateOrUpdate ensures load balancer resources for the given service, as needed. +// Returns an error if processing the service update failed. +func (l4c *L4Controller) processServiceCreateOrUpdate(key string, service *v1.Service) error { + // skip services that are being handled by the legacy service controller. + if utils.IsLegacyL4ILBService(service) { + klog.Warningf("Ignoring update for service %s:%s managed by service controller", service.Namespace, service.Name) + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerSkipped", + fmt.Sprintf("skipping l4 load balancer sync as service contains '%s' finalizer", common.LegacyILBFinalizer)) + return nil + } + + // Ensure v2 finalizer + if err := common.EnsureServiceFinalizer(service, common.ILBFinalizerV2, l4c.ctx.KubeClient); err != nil { + return fmt.Errorf("Failed to attach finalizer to service %s/%s, err %v", service.Namespace, service.Name, err) + } + l4 := loadbalancers.NewL4Handler(service, l4c.ctx.Cloud, meta.Regional, l4c.ctx.ClusterNamer, l4c.ctx.Recorder(service.Namespace), &l4c.sharedResourcesLock) + nodeNames, err := utils.GetReadyNodeNames(l4c.nodeLister) + if err != nil { + return err + } + // Use the same function for both create and updates. If controller crashes and restarts, + // all existing services will show up as Service Adds. + status, err := l4.EnsureInternalLoadBalancer(nodeNames, service) + if err != nil { + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", + "Error syncing load balancer: %v", err) + return err + } + if status == nil { + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", + "Empty status returned, even though there were no errors") + return fmt.Errorf("service status returned by EnsureInternalLoadBalancer for %s is nil", + l4.NamespacedName.String()) + } + if err = l4c.linkNEG(l4); err != nil { + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", + "Failed to link NEG with Backend Service for load balancer, err: %v", err) + return err + } + err = l4c.updateServiceStatus(service, status) + if err != nil { + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", + "Error updating load balancer status: %v", err) + return err + } + l4c.ctx.Recorder(service.Namespace).Eventf(service, v1.EventTypeNormal, "SyncLoadBalancerSuccessful", + "Successfully ensured load balancer resources") + return nil +} + +func (l4c *L4Controller) processServiceDeletion(key string, svc *v1.Service) error { + l4 := loadbalancers.NewL4Handler(svc, l4c.ctx.Cloud, meta.Regional, l4c.ctx.ClusterNamer, l4c.ctx.Recorder(svc.Namespace), &l4c.sharedResourcesLock) + l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer for %s", key) + if err := l4.EnsureInternalLoadBalancerDeleted(svc); err != nil { + l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeWarning, "DeleteLoadBalancerFailed", "Error deleting load balancer: %v", err) + return err + } + if err := common.EnsureDeleteServiceFinalizer(svc, common.ILBFinalizerV2, l4c.ctx.KubeClient); err != nil { + l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeWarning, "DeleteLoadBalancerFailed", + "Error removing finalizer from load balancer: %v", err) + return err + } + // Reset the loadbalancer status, Ignore NotFound error since the service can already be deleted at this point. + if err := l4c.updateServiceStatus(svc, &v1.LoadBalancerStatus{}); utils.IgnoreHTTPNotFound(err) != nil { + l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeWarning, "DeleteLoadBalancer", + "Error reseting load balancer status to empty: %v", err) + return err + } + l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") + return nil +} + +// linkNEG associates the NEG to the backendService for the given L4 ILB service. +func (l4c *L4Controller) linkNEG(l4 *loadbalancers.L4) error { + // link neg to backend service + zones, err := l4c.translator.ListZones() + if err != nil { + return nil + } + var groupKeys []backends.GroupKey + for _, zone := range zones { + groupKeys = append(groupKeys, backends.GroupKey{Zone: zone}) + } + return l4c.NegLinker.Link(l4.ServicePort, groupKeys) +} + +func (l4c *L4Controller) sync(key string) error { + svc, exists, err := l4c.ctx.Services().GetByKey(key) + if err != nil { + return fmt.Errorf("Failed to lookup service for key %s : %s", key, err) + } + if !exists || svc == nil { + // The service will not exist if its resources and finalizer are handled by the legacy service controller and + // it has been deleted. As long as the V2 finalizer is present, the service will not be deleted by apiserver. + klog.V(3).Infof("Ignoring delete of service %s not managed by L4 controller", key) + return nil + } + if needsDeletion(svc) { + klog.V(2).Infof("Deleting ILB resources for service %s managed by L4 controller", key) + return l4c.processServiceDeletion(key, svc) + } + return l4c.processServiceCreateOrUpdate(key, svc) +} + +func (l4c *L4Controller) updateServiceStatus(svc *v1.Service, newStatus *v1.LoadBalancerStatus) error { + if helper.LoadBalancerStatusEqual(&svc.Status.LoadBalancer, newStatus) { + return nil + } + updated := svc.DeepCopy() + updated.Status.LoadBalancer = *newStatus + if _, err := utils.PatchService(l4c.ctx.KubeClient.CoreV1(), svc, updated); err != nil { + return err + } + return nil +} + +func needsDeletion(svc *v1.Service) bool { + if !common.HasGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) { + return false + } + if common.IsDeletionCandidateForGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) { + return true + } + needsILB, _ := annotations.WantsL4ILB(svc) + return !needsILB +} + +// needsUpdate checks if load balancer needs to be updated due to change in attributes. +func (l4c *L4Controller) needsUpdate(oldService *v1.Service, newService *v1.Service) bool { + oldSvcWantsILB, _ := annotations.WantsL4ILB(oldService) + newSvcWantsILB, _ := annotations.WantsL4ILB(newService) + recorder := l4c.ctx.Recorder(oldService.Namespace) + if oldSvcWantsILB != newSvcWantsILB { + recorder.Eventf(newService, v1.EventTypeNormal, "Type", "%v -> %v", oldService.Spec.Type, newService.Spec.Type) + return true + } + + if !reflect.DeepEqual(oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges) { + recorder.Eventf(newService, v1.EventTypeNormal, "LoadBalancerSourceRanges", "%v -> %v", + oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges) + return true + } + + if !portsEqualForLBService(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity { + return true + } + + if !reflect.DeepEqual(oldService.Spec.SessionAffinityConfig, newService.Spec.SessionAffinityConfig) { + return true + } + if oldService.Spec.LoadBalancerIP != newService.Spec.LoadBalancerIP { + recorder.Eventf(newService, v1.EventTypeNormal, "LoadbalancerIP", "%v -> %v", + oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP) + return true + } + if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) { + recorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Count: %v -> %v", + len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs)) + return true + } + for i := range oldService.Spec.ExternalIPs { + if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] { + recorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Added: %v", + newService.Spec.ExternalIPs[i]) + return true + } + } + if !reflect.DeepEqual(oldService.Annotations, newService.Annotations) { + // Ignore update if only neg annotation changed, this is added by the neg controller. + if !annotations.OnlyNEGStatusChanged(oldService, newService) { + return true + } + } + if oldService.UID != newService.UID { + recorder.Eventf(newService, v1.EventTypeNormal, "UID", "%v -> %v", + oldService.UID, newService.UID) + return true + } + if oldService.Spec.ExternalTrafficPolicy != newService.Spec.ExternalTrafficPolicy { + recorder.Eventf(newService, v1.EventTypeNormal, "ExternalTrafficPolicy", "%v -> %v", + oldService.Spec.ExternalTrafficPolicy, newService.Spec.ExternalTrafficPolicy) + return true + } + if oldService.Spec.HealthCheckNodePort != newService.Spec.HealthCheckNodePort { + recorder.Eventf(newService, v1.EventTypeNormal, "HealthCheckNodePort", "%v -> %v", + oldService.Spec.HealthCheckNodePort, newService.Spec.HealthCheckNodePort) + return true + } + return false +} + +func getPortsForLB(service *v1.Service) []*v1.ServicePort { + ports := []*v1.ServicePort{} + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + ports = append(ports, sp) + } + return ports +} + +func portsEqualForLBService(x, y *v1.Service) bool { + xPorts := getPortsForLB(x) + yPorts := getPortsForLB(y) + return portSlicesEqualForLB(xPorts, yPorts) +} + +func portSlicesEqualForLB(x, y []*v1.ServicePort) bool { + if len(x) != len(y) { + return false + } + + for i := range x { + if !portEqualForLB(x[i], y[i]) { + return false + } + } + return true +} + +func portEqualForLB(x, y *v1.ServicePort) bool { + // TODO: Should we check name? (In theory, an LB could expose it) + if x.Name != y.Name { + return false + } + + if x.Protocol != y.Protocol { + return false + } + + if x.Port != y.Port { + return false + } + + if x.NodePort != y.NodePort { + return false + } + + if x.TargetPort != y.TargetPort { + return false + } + + return true +} diff --git a/pkg/l4/l4controller_test.go b/pkg/l4/l4controller_test.go new file mode 100644 index 0000000000..d2dcd961eb --- /dev/null +++ b/pkg/l4/l4controller_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package l4 + +import ( + "reflect" + "testing" + "time" + + "k8s.io/client-go/kubernetes" + testing2 "k8s.io/client-go/testing" + "k8s.io/ingress-gce/pkg/loadbalancers" + "k8s.io/ingress-gce/pkg/neg/types" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + api_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/ingress-gce/pkg/annotations" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/context" + "k8s.io/ingress-gce/pkg/test" + "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + clusterUID = "aaaaa" + resetLBStatus = "{\"status\":{\"loadBalancer\":{\"ingress\":null}}}" +) + +func newServiceController() *L4Controller { + kubeClient := fake.NewSimpleClientset() + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + (fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = loadbalancers.InsertForwardingRuleHook + + namer := namer.NewNamer(clusterUID, "") + + stopCh := make(chan struct{}) + ctxConfig := context.ControllerContextConfig{ + Namespace: api_v1.NamespaceAll, + ResyncPeriod: 1 * time.Minute, + } + ctx := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig) + return NewController(ctx, stopCh) +} + +func addILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Create(svc) + l4c.ctx.ServiceInformer.GetIndexer().Add(svc) +} + +func updateILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Update(svc) + l4c.ctx.ServiceInformer.GetIndexer().Update(svc) +} + +func deleteILBService(l4c *L4Controller, svc *api_v1.Service) { + l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{}) + l4c.ctx.ServiceInformer.GetIndexer().Delete(svc) +} + +func addNEG(l4c *L4Controller, svc *api_v1.Service) { + // Also create a fake NEG for this service since the sync code will try to link the backend service to NEG + negName := l4c.ctx.ClusterNamer.PrimaryIPNEG(svc.Namespace, svc.Name) + neg := &composite.NetworkEndpointGroup{Name: negName} + key := meta.ZonalKey(negName, types.TestZone1) + composite.CreateNetworkEndpointGroup(l4c.ctx.Cloud, key, neg) +} + +func getKeyForSvc(svc *api_v1.Service, t *testing.T) string { + key, err := common.KeyFunc(svc) + if err != nil { + t.Fatalf("Failed to get key for service %v, err : %v", svc, err) + } + return key +} + +// validatePatchRequest validates that the given client patched the resource with the given change. +// This is needed because there is a bug in go-client test implementation where a patch operation cannot be used +// to delete fields - https://github.com/kubernetes/client-go/issues/607 +// TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. +func validatePatchRequest(client kubernetes.Interface, patchVal string, t *testing.T) { + fakeClient := client.(*fake.Clientset) + actionLen := len(fakeClient.Actions()) + if actionLen == 0 { + t.Errorf("Expected atleast one action in fake client") + } + // The latest action should be the one setting status to the given value + patchAction := fakeClient.Actions()[actionLen-1].(testing2.PatchAction) + if !reflect.DeepEqual(patchAction.GetPatch(), []byte(patchVal)) { + t.Errorf("Expected patch '%s', got '%s'", patchVal, string(patchAction.GetPatch())) + } +} + +func validateSvcStatus(svc *api_v1.Service, expectStatus bool, t *testing.T) { + if common.HasGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) != expectStatus { + t.Fatalf("Expected L4 finalizer present to be %v, but it was %v", expectStatus, !expectStatus) + } + if len(svc.Status.LoadBalancer.Ingress) == 0 || svc.Status.LoadBalancer.Ingress[0].IP == "" { + if expectStatus { + t.Fatalf("Invalid LoadBalancer status field in service - %+v", svc.Status.LoadBalancer) + } + } + if len(svc.Status.LoadBalancer.Ingress) > 0 && !expectStatus { + // TODO uncomment below once https://github.com/kubernetes/client-go/issues/607 has been fixed. + // t.Fatalf("Expected LoadBalancer status to be empty, Got %v", svc.Status.LoadBalancer) + } +} + +// TestProcessCreateOrUpdate verifies the processing loop in L4Controller. +// This test adds a new service, then performs a valid update and then modifies the service type to External and ensures +// that the status field is as expected in each case. +func TestProcessCreateOrUpdate(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // set the TrafficPolicy of the service to Local + newSvc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // Remove the Internal LoadBalancer annotation, this should trigger a cleanup. + delete(newSvc.Annotations, gce.ServiceAnnotationLoadBalancerType) + updateILBService(l4c, newSvc) + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. + validatePatchRequest(l4c.client, resetLBStatus, t) + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, false, t) +} + +func TestProcessDeletion(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + addILBService(l4c, newSvc) + addNEG(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) + + // Mark the service for deletion by updating timestamp. Use svc instead of newSvc since that has the finalizer. + newSvc.DeletionTimestamp = &v1.Time{} + updateILBService(l4c, newSvc) + if !needsDeletion(newSvc) { + t.Errorf("Incorrectly marked service %v as not needing ILB deletion", newSvc) + } + err = l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err) + } + // TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed. + validatePatchRequest(l4c.client, resetLBStatus, t) + // List the service and ensure that it contains the finalizer as well as Status field. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, false, t) + deleteILBService(l4c, newSvc) + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if newSvc != nil { + t.Errorf("Expected service to be deleted, but was found - %v", newSvc) + } +} + +func TestProcessCreateLegacyService(t *testing.T) { + l4c := newServiceController() + newSvc := test.NewL4ILBService(false, 8080) + // Set the legacy finalizer + newSvc.Finalizers = append(newSvc.Finalizers, common.LegacyILBFinalizer) + addILBService(l4c, newSvc) + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that the status field is not updated. + svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(svc, false, t) +} + +func TestProcessUpdateClusterIPToILBService(t *testing.T) { + l4c := newServiceController() + clusterSvc := &api_v1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: "testsvc", + Namespace: "testns", + }, + } + addILBService(l4c, clusterSvc) + if needsILB, _ := annotations.WantsL4ILB(clusterSvc); needsILB { + t.Errorf("Incorrectly marked service %v as needing ILB", clusterSvc) + } + if needsDeletion(clusterSvc) { + t.Errorf("Incorrectly marked service %v as needing ILB deletion", clusterSvc) + } + // Change to Internal LoadBalancer type + newSvc := clusterSvc.DeepCopy() + newSvc.Spec.Type = api_v1.ServiceTypeLoadBalancer + newSvc.Annotations = make(map[string]string) + newSvc.Annotations[gce.ServiceAnnotationLoadBalancerType] = string(gce.LBTypeInternal) + updateILBService(l4c, newSvc) + if !l4c.needsUpdate(clusterSvc, newSvc) { + t.Errorf("Incorrectly marked service %v as not needing update", newSvc) + } + err := l4c.sync(getKeyForSvc(newSvc, t)) + if err != nil { + t.Errorf("Failed to sync newly updated service %s, err %v", newSvc.Name, err) + } + // List the service and ensure that the status field is updated. + newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{}) + if err != nil { + t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err) + } + validateSvcStatus(newSvc, true, t) +} diff --git a/pkg/loadbalancers/fakes.go b/pkg/loadbalancers/fakes.go index dcab2ee54b..4ca9a41528 100644 --- a/pkg/loadbalancers/fakes.go +++ b/pkg/loadbalancers/fakes.go @@ -43,3 +43,10 @@ func InsertGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *com } return false, nil } + +func InsertForwardingRuleHook(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule, m *cloud.MockForwardingRules) (b bool, e error) { + if obj.IPAddress == "" { + obj.IPAddress = "10.0.0.1" + } + return false, nil +} diff --git a/pkg/loadbalancers/forwarding_rules.go b/pkg/loadbalancers/forwarding_rules.go index 959f62c1bf..e0ac15438e 100644 --- a/pkg/loadbalancers/forwarding_rules.go +++ b/pkg/loadbalancers/forwarding_rules.go @@ -19,12 +19,17 @@ package loadbalancers import ( "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" + "k8s.io/legacy-cloud-providers/gce" ) const ( @@ -181,3 +186,166 @@ func (l *L7) getEffectiveIP() (string, bool) { } return "", true } + +// ensureForwardingRule creates a forwarding rule with the given name, if it does not exist. It updates the existing +// forwarding rule if needed. +func (l *L4) ensureForwardingRule(loadBalancerName, bsLink string, options gce.ILBOptions) (*composite.ForwardingRule, error) { + key, err := l.CreateKey(loadBalancerName) + if err != nil { + return nil, err + } + desc := utils.L4ILBResourceDescription{} + // version used for creating the existing forwarding rule. + existingVersion := meta.VersionGA + // version to use for the new forwarding rule + newVersion := getAPIVersion(options) + + // Get the GA version forwarding rule, use the description to identify the version it was created with. + existingFwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionGA) + if utils.IgnoreHTTPNotFound(err) != nil { + return nil, err + } + if existingFwdRule != nil { + if err = desc.Unmarshal(existingFwdRule.Description); err != nil { + klog.Warningf("Failed to lookup forwarding rule version from description, err %v. Using GA Version.", err) + } else { + existingVersion = desc.APIVersion + } + } + // Fetch the right forwarding rule in case it is not using GA + if existingVersion != meta.VersionGA { + existingFwdRule, err = composite.GetForwardingRule(l.cloud, key, existingVersion) + if utils.IgnoreHTTPNotFound(err) != nil { + klog.Errorf("Failed to lookup forwarding rule '%s' at version - %s, err %v", key.Name, existingVersion, err) + return nil, err + } + } + + if l.cloud.IsLegacyNetwork() { + l.recorder.Event(l.Service, v1.EventTypeWarning, "ILBOptionsIgnored", "Internal LoadBalancer options are not supported with Legacy Networks.") + options = gce.ILBOptions{} + } + subnetworkURL := l.cloud.SubnetworkURL() + + if !l.cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureILBCustomSubnet) { + if options.SubnetName != "" { + l.recorder.Event(l.Service, v1.EventTypeWarning, "ILBCustomSubnetOptionIgnored", "Internal LoadBalancer CustomSubnet options ignored as the feature gate is disabled.") + options.SubnetName = "" + } + } + if l.cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureILBCustomSubnet) { + // If this feature is enabled, changes to subnet annotation will be + // picked up and reflected in the forwarding rule. + // Removing the annotation will set the forwarding rule to use the default subnet. + if options.SubnetName != "" { + subnetKey := *key + subnetKey.Name = options.SubnetName + subnetworkURL = cloud.SelfLink(meta.VersionGA, l.cloud.ProjectID(), "subnetworks", &subnetKey) + } + } else { + // TODO(84885) remove this once ILBCustomSubnet goes beta. + if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { + // If the ILB already exists, continue using the subnet that it's already using. + // This is to support existing ILBs that were setup using the wrong subnet - https://github.com/kubernetes/kubernetes/pull/57861 + subnetworkURL = existingFwdRule.Subnetwork + } + } + // Determine IP which will be used for this LB. If no forwarding rule has been established + // or specified in the Service spec, then requestedIP = "". + ipToUse := ilbIPToUse(l.Service, existingFwdRule, subnetworkURL) + klog.V(2).Infof("ensureForwardingRule(%v): Using subnet %s for LoadBalancer IP %s", loadBalancerName, options.SubnetName, ipToUse) + + var addrMgr *addressManager + // If the network is not a legacy network, use the address manager + if !l.cloud.IsLegacyNetwork() { + nm := types.NamespacedName{Namespace: l.Service.Namespace, Name: l.Service.Name}.String() + addrMgr = newAddressManager(l.cloud, nm, l.cloud.Region(), subnetworkURL, loadBalancerName, ipToUse, cloud.SchemeInternal) + ipToUse, err = addrMgr.HoldAddress() + if err != nil { + return nil, err + } + klog.V(2).Infof("ensureForwardingRule(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) + } + + ports, _, protocol := utils.GetPortsAndProtocol(l.Service.Spec.Ports) + // Create the forwarding rule + frDesc, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(l.Service.Namespace, l.Service.Name), ipToUse, + newVersion) + if err != nil { + return nil, fmt.Errorf("Failed to compute description for forwarding rule %s, err: %v", loadBalancerName, + err) + } + + fr := &composite.ForwardingRule{ + Name: loadBalancerName, + IPAddress: ipToUse, + Ports: ports, + IPProtocol: string(protocol), + LoadBalancingScheme: string(cloud.SchemeInternal), + Subnetwork: subnetworkURL, + Network: l.cloud.NetworkURL(), + Version: newVersion, + BackendService: bsLink, + AllowGlobalAccess: options.AllowGlobalAccess, + Description: frDesc, + } + + if existingFwdRule != nil { + if Equal(existingFwdRule, fr) { + // nothing to do + klog.V(2).Infof("ensureForwardingRule: Skipping update of unchanged forwarding rule - %s", fr.Name) + return existingFwdRule, nil + } else { + klog.V(2).Infof("ensureForwardingRule: Deleting existing forwarding rule - %s, will be recreated", fr.Name) + if err = utils.IgnoreHTTPNotFound(composite.DeleteForwardingRule(l.cloud, key, existingVersion)); err != nil { + return nil, err + } + } + } + klog.V(2).Infof("ensureForwardingRule: Recreating forwarding rule - %s", fr.Name) + if err = composite.CreateForwardingRule(l.cloud, key, fr); err != nil { + return nil, err + } + + return composite.GetForwardingRule(l.cloud, key, fr.Version) +} + +func Equal(fr1, fr2 *composite.ForwardingRule) bool { + // If one of the IP addresses is empty, do not consider it as an inequality. + // If the IP address drops from a valid IP to empty, we do not want to apply + // the change if it is the only change in the forwarding rule. Similarly, if + // the forwarding rule changes from an empty IP to an allocated IP address, the + // subnetwork will change as well. + return (fr1.IPAddress == "" || fr2.IPAddress == "" || fr1.IPAddress == fr2.IPAddress) && + fr1.IPProtocol == fr2.IPProtocol && + fr1.LoadBalancingScheme == fr2.LoadBalancingScheme && + utils.EqualStringSets(fr1.Ports, fr2.Ports) && + fr1.BackendService == fr2.BackendService && + fr1.AllowGlobalAccess == fr2.AllowGlobalAccess && + fr1.Subnetwork == fr2.Subnetwork +} + +// ilbIPToUse determines which IP address needs to be used in the ForwardingRule. If an IP has been +// specified by the user, that is used. If there is an existing ForwardingRule, the ip address from +// that is reused. In case a subnetwork change is requested, the existing ForwardingRule IP is ignored. +func ilbIPToUse(svc *v1.Service, fwdRule *composite.ForwardingRule, requestedSubnet string) string { + if svc.Spec.LoadBalancerIP != "" { + return svc.Spec.LoadBalancerIP + } + if fwdRule == nil { + return "" + } + if requestedSubnet != fwdRule.Subnetwork { + // reset ip address since subnet is being changed. + return "" + } + return fwdRule.IPAddress +} + +// getAPIVersion returns the API version to use for CRUD of Forwarding rules, given the options enabled. +func getAPIVersion(options gce.ILBOptions) meta.Version { + if options.AllowGlobalAccess { + return meta.VersionBeta + } + return meta.VersionGA +} diff --git a/pkg/loadbalancers/l4.go b/pkg/loadbalancers/l4.go new file mode 100644 index 0000000000..bb6327f43b --- /dev/null +++ b/pkg/loadbalancers/l4.go @@ -0,0 +1,230 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancers + +import ( + "strconv" + "strings" + "sync" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/cloud-provider/service/helpers" + "k8s.io/ingress-gce/pkg/backends" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/firewalls" + "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/klog" + "k8s.io/legacy-cloud-providers/gce" +) + +// Many of the functions in this file are re-implemented from gce_loadbalancer_internal.go +// L4 handles the resource creation/deletion/update for a given L4 ILB service. +type L4 struct { + cloud *gce.Cloud + backendPool *backends.Backends + scope meta.KeyType + namer *namer.Namer + // recorder is used to generate k8s Events. + recorder record.EventRecorder + Service *corev1.Service + ServicePort utils.ServicePort + NamespacedName types.NamespacedName + sharedResourcesLock *sync.Mutex +} + +// NewL4Handler creates a new L4Handler for the given L4 service. +func NewL4Handler(service *corev1.Service, cloud *gce.Cloud, scope meta.KeyType, namer *namer.Namer, recorder record.EventRecorder, lock *sync.Mutex) *L4 { + l := &L4{cloud: cloud, scope: scope, namer: namer, recorder: recorder, Service: service, sharedResourcesLock: lock} + l.NamespacedName = types.NamespacedName{Name: service.Name, Namespace: service.Namespace} + l.backendPool = backends.NewPool(l.cloud, l.namer) + l.ServicePort = utils.ServicePort{ID: utils.ServicePortID{Service: l.NamespacedName}, BackendNamer: l.namer, + PrimaryIPNEGEnabled: true} + return l +} + +// CreateKey generates a meta.Key for a given GCE resource name. +func (l *L4) CreateKey(name string) (*meta.Key, error) { + return composite.CreateKey(l.cloud, name, l.scope) +} + +// getILBOptions fetches the optional features requested on the given ILB service. +func getILBOptions(svc *corev1.Service) gce.ILBOptions { + return gce.ILBOptions{AllowGlobalAccess: gce.GetLoadBalancerAnnotationAllowGlobalAccess(svc), + SubnetName: gce.GetLoadBalancerAnnotationSubnet(svc)} +} + +// EnsureInternalLoadBalancerDeleted performs a cleanup of all GCE resources for the given loadbalancer service. +func (l *L4) EnsureInternalLoadBalancerDeleted(svc *corev1.Service) error { + klog.V(2).Infof("EnsureInternalLoadBalancerDeleted(%s): attempting delete of load balancer resources", l.NamespacedName.String()) + sharedHC := !helpers.RequestsOnlyLocalTraffic(svc) + // All resources use the NEG Name, except forwarding rule. + name := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + frName := l.GetFRName() + key, err := l.CreateKey(frName) + if err != nil { + klog.Errorf("Failed to create key for LoadBalancer resources with name %s for service %s, err %v", frName, l.NamespacedName.String(), err) + return err + } + retErr := err + // If any resource deletion fails, log the error and continue cleanup. + if err = utils.IgnoreHTTPNotFound(composite.DeleteForwardingRule(l.cloud, key, getAPIVersion(getILBOptions(l.Service)))); err != nil { + klog.Errorf("Failed to delete forwarding rule for internal loadbalancer service %s, err %v", l.NamespacedName.String(), err) + retErr = err + } + if err = ensureAddressDeleted(l.cloud, name, l.cloud.Region()); err != nil { + klog.Errorf("Failed to delete address for internal loadbalancer service %s, err %v", l.NamespacedName.String(), err) + retErr = err + } + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), name) + // delete fw rules + deleteFunc := func(name string) error { + err := firewalls.EnsureL4InternalFirewallRuleDeleted(l.cloud, name) + if err != nil { + if fwErr, ok := err.(*firewalls.FirewallXPNError); ok { + l.recorder.Eventf(l.Service, corev1.EventTypeNormal, "XPN", fwErr.Message) + return nil + } + return err + } + return nil + } + // delete firewall rule allowing load balancer source ranges + err = deleteFunc(name) + if err != nil { + klog.Errorf("Failed to delete firewall rule %s for internal loadbalancer service %s, err %v", name, l.NamespacedName.String(), err) + retErr = err + } + + // delete firewall rule allowing healthcheck source ranges + err = deleteFunc(hcFwName) + if err != nil { + klog.Errorf("Failed to delete firewall rule %s for internal loadbalancer service %s, err %v", hcFwName, l.NamespacedName.String(), err) + retErr = err + } + // delete backend service + err = utils.IgnoreHTTPNotFound(l.backendPool.Delete(name, meta.VersionGA, meta.Regional)) + if err != nil { + klog.Errorf("Failed to delete backends for internal loadbalancer service %s, err %v", l.NamespacedName.String(), err) + retErr = err + } + + // Delete healthcheck + if sharedHC { + l.sharedResourcesLock.Lock() + defer l.sharedResourcesLock.Unlock() + } + err = utils.IgnoreHTTPNotFound(healthchecks.DeleteHealthCheck(l.cloud, hcName)) + if err != nil { + // This error will be hit if this is a shared healthcheck. + if utils.IsInUsedByError(err) { + klog.V(2).Infof("Failed to delete healthcheck %s: health check in use.", hcName) + return retErr + } + klog.Errorf("Failed to delete healthcheck for internal loadbalancer service %s, err %v", l.NamespacedName.String(), err) + return err + } + return retErr +} + +// GetFRName returns the name of the forwarding rule for the given ILB service. +// This appends the protocol to the forwarding rule name, which will help supporting multiple protocols in the same ILB +// service. +func (l *L4) GetFRName() string { + _, _, protocol := utils.GetPortsAndProtocol(l.Service.Spec.Ports) + lbName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + return lbName + "-" + strings.ToLower(string(protocol)) +} + +// EnsureInternalLoadBalancer ensures that all GCE resources for the given loadbalancer service have +// been created. It returns a LoadBalancerStatus with the updated ForwardingRule IP address. +func (l *L4) EnsureInternalLoadBalancer(nodeNames []string, svc *corev1.Service) (*corev1.LoadBalancerStatus, error) { + // Use the same resource name for NEG, BackendService as well as FR, FWRule. + l.Service = svc + name := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + options := getILBOptions(l.Service) + + // create healthcheck + sharedHC := !helpers.RequestsOnlyLocalTraffic(l.Service) + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), name) + hcPath, hcPort := gce.GetNodesHealthCheckPath(), gce.GetNodesHealthCheckPort() + if !sharedHC { + hcPath, hcPort = helpers.GetServiceHealthCheckPathPort(l.Service) + } else { + // Take the lock when creating the shared healthcheck + l.sharedResourcesLock.Lock() + } + _, hcLink, err := healthchecks.EnsureL4HealthCheck(l.cloud, hcName, l.NamespacedName, sharedHC, hcPath, hcPort) + if sharedHC { + // unlock here so rest of the resource creation API can be called without unnecessarily holding the lock. + l.sharedResourcesLock.Unlock() + } + if err != nil { + return nil, err + } + + _, portRanges, protocol := utils.GetPortsAndProtocol(l.Service.Spec.Ports) + + // ensure firewalls + sourceRanges, err := helpers.GetLoadBalancerSourceRanges(l.Service) + if err != nil { + return nil, err + } + hcSourceRanges := gce.L4LoadBalancerSrcRanges() + ensureFunc := func(name, IP string, sourceRanges, portRanges []string, proto string) error { + nsName := utils.ServiceKeyFunc(l.Service.Namespace, l.Service.Name) + err := firewalls.EnsureL4InternalFirewallRule(l.cloud, name, IP, nsName, sourceRanges, portRanges, nodeNames, proto) + if err != nil { + if fwErr, ok := err.(*firewalls.FirewallXPNError); ok { + l.recorder.Eventf(l.Service, corev1.EventTypeNormal, "XPN", fwErr.Message) + return nil + } + return err + } + return nil + } + // Add firewall rule for ILB traffic to nodes + err = ensureFunc(name, "", sourceRanges.StringSlice(), portRanges, string(protocol)) + if err != nil { + return nil, err + } + + // Add firewall rule for healthchecks to nodes + err = ensureFunc(hcFwName, "", hcSourceRanges, []string{strconv.Itoa(int(hcPort))}, string(corev1.ProtocolTCP)) + if err != nil { + return nil, err + } + // ensure backend service + bs, err := l.backendPool.EnsureL4BackendService(name, hcLink, string(protocol), string(l.Service.Spec.SessionAffinity), + string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + return nil, err + } + + // create fr rule + fr, err := l.ensureForwardingRule(l.GetFRName(), bs.SelfLink, options) + if err != nil { + klog.Errorf("EnsureInternalLoadBalancer: Failed to create forwarding rule - %v", err) + return nil, err + } + return &corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: fr.IPAddress}}}, nil +} diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go new file mode 100644 index 0000000000..8a95cd19bb --- /dev/null +++ b/pkg/loadbalancers/l4_test.go @@ -0,0 +1,1098 @@ +package loadbalancers + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "google.golang.org/api/compute/v1" + "k8s.io/ingress-gce/pkg/backends" + "k8s.io/ingress-gce/pkg/firewalls" + "k8s.io/ingress-gce/pkg/utils" + "reflect" + "strings" + "sync" + "testing" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" + "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + servicehelper "k8s.io/cloud-provider/service/helpers" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/test" + namer_util "k8s.io/ingress-gce/pkg/utils/namer" + "k8s.io/legacy-cloud-providers/gce" +) + +const ( + clusterUID = "aaaaa" + // TODO Uncomment after https://github.com/kubernetes/kubernetes/pull/87667 is available in vendor. + // eventMsgFirewallChange = "XPN Firewall change required by network admin" +) + +func getFakeGCECloud(vals gce.TestClusterValues) *gce.Cloud { + fakeGCE := gce.NewFakeGCECloud(vals) + // InsertHook required to assign an IP Address for forwarding rule + (fakeGCE.Compute().(*cloud.MockGCE)).MockAddresses.InsertHook = mock.InsertAddressHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockAlphaAddresses.X = mock.AddressAttributes{} + (fakeGCE.Compute().(*cloud.MockGCE)).MockAddresses.X = mock.AddressAttributes{} + (fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = mock.InsertFwdRuleHook + + (fakeGCE.Compute().(*cloud.MockGCE)).MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook + (fakeGCE.Compute().(*cloud.MockGCE)).MockFirewalls.UpdateHook = mock.UpdateFirewallHook + return fakeGCE +} + +func TestEnsureInternalBackendServiceUpdates(t *testing.T) { + t.Parallel() + fakeGCE := getFakeGCECloud(gce.DefaultTestClusterValues()) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + bsName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + _, err := l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(svc.Spec.SessionAffinity), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + + // Update the Internal Backend Service with a new ServiceAffinity + _, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + key := meta.RegionalKey(bsName, l.cloud.Region()) + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to get backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Expected session affinity '%s' in %+v, Got '%s'", strings.ToUpper(string(v1.ServiceAffinityNone)), bs, bs.SessionAffinity) + } + // Change the Connection Draining timeout to a different value manually. Also update session Affinity to trigger + // an update in the Ensure method. The timeout value should not be reconciled. + newTimeout := int64(backends.DefaultConnectionDrainingTimeoutSeconds * 2) + bs.ConnectionDraining.DrainingTimeoutSec = newTimeout + bs.SessionAffinity = strings.ToUpper(string(v1.ServiceAffinityClientIP)) + err = composite.UpdateBackendService(l.cloud, key, bs) + if err != nil { + t.Errorf("Failed to update backend service with new connection draining timeout - err %v", err) + } + bs, err = l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(v1.ServiceAffinityNone), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to ensure backend service %s - err %v", bsName, err) + } + if bs.SessionAffinity != strings.ToUpper(string(v1.ServiceAffinityNone)) { + t.Errorf("Backend service did not get updated.") + } + if bs.ConnectionDraining.DrainingTimeoutSec != newTimeout { + t.Errorf("Connection Draining timeout got reconciled to %d, expected %d", bs.ConnectionDraining.DrainingTimeoutSec, newTimeout) + } +} + +func TestEnsureInternalLoadBalancer(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) +} + +func TestEnsureInternalLoadBalancerTypeChange(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Now add the latest annotation and change scheme to external + svc.Annotations[gce.ServiceAnnotationLoadBalancerType] = "" + // This will be invoked by service_controller + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + + // Create the expected resources necessary for an Internal Load Balancer + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), lbName) + hcPath, hcPort := gce.GetNodesHealthCheckPath(), gce.GetNodesHealthCheckPort() + _, hcLink, err := healthchecks.EnsureL4HealthCheck(l.cloud, hcName, l.NamespacedName, sharedHC, hcPath, hcPort) + if err != nil { + t.Errorf("Failed to create healthcheck, err %v", err) + } + _, err = l.backendPool.EnsureL4BackendService(lbName, hcLink, "TCP", string(l.Service.Spec.SessionAffinity), + string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) + if err != nil { + t.Errorf("Failed to create backendservice, err %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) +} + +// TestEnsureInternalLoadBalancerClearPreviousResources creates ILB resources with incomplete configuration and verifies +// that they are updated when the controller processes the load balancer service. +func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + frName := l.GetFRName() + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + + // Create a ForwardingRule that's missing an IP address + existingFwdRule := &composite.ForwardingRule{ + Name: frName, + IPAddress: "", + Ports: []string{"123"}, + IPProtocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + } + if err = composite.CreateForwardingRule(l.cloud, key, existingFwdRule); err != nil { + t.Errorf("Failed to create fake forwarding rule %s, err %v", lbName, err) + } + key.Name = lbName + // Create a Firewall that's missing a Description + existingFirewall := &compute.Firewall{ + Name: lbName, + Network: fakeGCE.NetworkURL(), + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: "tcp", + Ports: []string{"123"}, + }, + }, + } + fakeGCE.CreateFirewall(existingFirewall) + + sharedHealthCheck := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHealthCheck, l.namer.UID(), lbName) + + // Create a healthcheck with an incomplete fields + existingHC := &composite.HealthCheck{Name: hcName} + // hcName will be same as lbName since this service uses trafficPolicy Local. So the same key can be used. + if err = composite.CreateHealthCheck(fakeGCE, key, existingHC); err != nil { + t.Errorf("Failed to create fake healthcheck %s, err %v", hcName, err) + } + + // Create a backend Service that's missing Description and Backends + existingBS := &composite.BackendService{ + Name: lbName, + Protocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + } + + if err = composite.CreateBackendService(fakeGCE, key, existingBS); err != nil { + t.Errorf("Failed to create fake backend service %s, err %v", lbName, err) + } + existingFwdRule.BackendService = existingBS.Name + + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + key.Name = frName + // Expect new resources with the correct attributes to be created + newFwdRule, err := composite.GetForwardingRule(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup forwarding rule %s, err %v", lbName, err) + } + if newFwdRule == existingFwdRule { + t.Errorf("Expected incomplete forwarding rule to be updated") + } + + newFirewall, err := fakeGCE.GetFirewall(lbName) + if err != nil { + t.Errorf("Failed to lookup firewall rule %s, err %v", lbName, err) + } + if newFirewall == existingFirewall { + t.Errorf("Expected incomplete firewall rule to be updated") + } + + key.Name = lbName + newHC, err := composite.GetHealthCheck(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup healthcheck %s, err %v", lbName, err) + } + if newHC == existingHC || newHC.SelfLink == "" { + t.Errorf("Expected incomplete healthcheck to be updated") + } + + newBS, err := composite.GetBackendService(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup backend service %s, err %v", lbName, err) + } + if newBS == existingBS { + t.Errorf("Expected incomplete backend service to be updated") + } +} + +// TestUpdateResourceLinks verifies that an existing backend service created with different healthchecks is reconciled +// upon load balancer sync. The other healthchecks are not deleted. +func TestUpdateResourceLinks(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + key.Name = "hc1" + hc1 := &composite.HealthCheck{Name: "hc1"} + if err = composite.CreateHealthCheck(fakeGCE, key, hc1); err != nil { + t.Errorf("Failed to create fake healthcheck hc1 , err %v", err) + } + + key.Name = "hc2" + hc2 := &composite.HealthCheck{Name: "hc2"} + if err = composite.CreateHealthCheck(fakeGCE, key, hc2); err != nil { + t.Errorf("Failed to create fake healthcheck hc2, err %v", err) + } + + key.Name = lbName + // Create a backend Service that's missing Description and Backends + existingBS := &composite.BackendService{ + Name: lbName, + Protocol: "TCP", + LoadBalancingScheme: string(cloud.SchemeInternal), + HealthChecks: []string{"hc1", "hc2"}, + } + + if err = composite.CreateBackendService(fakeGCE, key, existingBS); err != nil { + t.Errorf("Failed to create fake backend service %s, err %v", lbName, err) + } + bs, err := composite.GetBackendService(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup backend service") + } + if !reflect.DeepEqual(bs.HealthChecks, []string{"hc1", "hc2"}) { + t.Errorf("Unexpected healthchecks in backend service - %v", bs.HealthChecks) + } + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + // verifies that the right healthcheck is present + assertInternalLbResources(t, svc, l, nodeNames) + + // ensure that the other healthchecks still exist. + key.Name = "hc1" + if hc1, err = composite.GetHealthCheck(fakeGCE, key, meta.VersionGA); err != nil { + t.Errorf("Failed to lookup healthcheck - hc1") + } + if hc1 == nil { + t.Errorf("Got nil healthcheck") + } + key.Name = "hc2" + if hc2, err = composite.GetHealthCheck(fakeGCE, key, meta.VersionGA); err != nil { + t.Errorf("Failed to lookup healthcheck - hc1") + } + if hc2 == nil { + t.Errorf("Got nil healthcheck") + } +} + +func TestEnsureInternalLoadBalancerHealthCheckConfigurable(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + nodeNames := []string{"test-node-1"} + + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(true, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + sharedHealthCheck := !servicehelper.RequestsOnlyLocalTraffic(svc) + hcName, _ := healthchecks.HealthCheckName(sharedHealthCheck, l.namer.UID(), lbName) + + // Create a healthcheck with an incorrect threshold, default value is 8s. + existingHC := &composite.HealthCheck{Name: hcName, CheckIntervalSec: 6000} + if err = composite.CreateHealthCheck(fakeGCE, key, existingHC); err != nil { + t.Errorf("Failed to create fake healthcheck %s, err %v", hcName, err) + } + + if _, err = l.EnsureInternalLoadBalancer(nodeNames, svc); err != nil { + t.Errorf("Failed to ensure loadBalancer %s, err %v", lbName, err) + } + + newHC, err := composite.GetHealthCheck(fakeGCE, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to lookup healthcheck %s, err %v", lbName, err) + } + if newHC.CheckIntervalSec != existingHC.CheckIntervalSec { + t.Errorf("Check interval got incorrectly reconciled") + } +} + +func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) + + // Deleting the loadbalancer and resources again should not cause an error. + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + + healthCheckNodePort := int32(10101) + svc.Spec.HealthCheckNodePort = healthCheckNodePort + svc.Spec.Type = v1.ServiceTypeLoadBalancer + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal + + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + lbName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + key, err := composite.CreateKey(l.cloud, lbName, meta.Global) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + hc, err := composite.GetHealthCheck(l.cloud, key, meta.VersionGA) + if err != nil || hc == nil { + t.Errorf("Failed to get healthcheck, err %v", err) + } + if hc.HttpHealthCheck.Port != int64(healthCheckNodePort) { + t.Errorf("Unexpected port in healthcheck, expected %d, Got %d", healthCheckNodePort, hc.HttpHealthCheck.Port) + } +} + +type EnsureILBParams struct { + clusterName string + clusterID string + service *v1.Service + existingFwdRule *composite.ForwardingRule +} + +// newEnsureILBParams is the constructor of EnsureILBParams. +func newEnsureILBParams() *EnsureILBParams { + vals := gce.DefaultTestClusterValues() + return &EnsureILBParams{ + vals.ClusterName, + vals.ClusterID, + test.NewL4ILBService(false, 8080), + nil, + } +} + +// TestEnsureInternalLoadBalancerErrors tests the function +// EnsureInternalLoadBalancer, making sure the system won't panic when +// exceptions raised by gce. +func TestEnsureInternalLoadBalancerErrors(t *testing.T) { + vals := gce.DefaultTestClusterValues() + var params *EnsureILBParams + + for desc, tc := range map[string]struct { + adjustParams func(*EnsureILBParams) + injectMock func(*cloud.MockGCE) + }{ + "EnsureInternalBackendService failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockRegionBackendServices.GetHook = mock.GetRegionBackendServicesErrHook + }, + }, + "Create internal health check failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockHealthChecks.GetHook = mock.GetHealthChecksInternalErrHook + }, + }, + "Create firewall failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook + }, + }, + "Create region forwarding rule failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.InsertHook = mock.InsertForwardingRulesInternalErrHook + }, + }, + "Get region forwarding rule failed": { + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.GetHook = mock.GetForwardingRulesInternalErrHook + }, + }, + "Delete region forwarding rule failed": { + adjustParams: func(params *EnsureILBParams) { + params.existingFwdRule = &composite.ForwardingRule{BackendService: "badBackendService"} + }, + injectMock: func(c *cloud.MockGCE) { + c.MockForwardingRules.DeleteHook = mock.DeleteForwardingRuleErrHook + }, + }, + } { + t.Run(desc, func(t *testing.T) { + fakeGCE := getFakeGCECloud(gce.DefaultTestClusterValues()) + nodeNames := []string{"test-node-1"} + params = newEnsureILBParams() + if tc.adjustParams != nil { + tc.adjustParams(params) + } + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(params.service, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + //lbName := l.namer.PrimaryIPNEG(params.service.Namespace, params.service.Name) + frName := l.GetFRName() + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + _, err = test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + // Create a dummy forwarding rule in order to trigger a delete in the EnsureInternalLoadBalancer function. + if err = composite.CreateForwardingRule(l.cloud, key, &composite.ForwardingRule{Name: frName}); err != nil { + t.Errorf("Failed to create fake forwarding rule %s, err %v", frName, err) + } + // Inject error hooks after creating the forwarding rule + if tc.injectMock != nil { + tc.injectMock(fakeGCE.Compute().(*cloud.MockGCE)) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, params.service) + if err == nil { + t.Errorf("Expected error when %s", desc) + } + if status != nil { + t.Errorf("Expected empty status when %s, Got %v", desc, status) + } + }) + } +} + +/* TODO uncomment after https://github.com/kubernetes/kubernetes/pull/87667 is available in vendor +func TestEnsureLoadBalancerDeletedSucceedsOnXPN(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + vals.OnXPN = true + fakeGCE := gce.NewFakeGCECloud(vals) + c := fakeGCE.Compute().(*cloud.MockGCE) + svc := test.NewL4ILBService(false, 8080) + nodeNames := []string{"test-node-1"} + namer := namer_util.NewNamer(clusterUID, "") + recorder := record.NewFakeRecorder(100) + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, recorder, &sync.Mutex{})) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + fwName := l.namer.PrimaryIPNEG(svc.Namespace, svc.Name) + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + c.MockFirewalls.DeleteHook = mock.DeleteFirewallsUnauthorizedErrHook + + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Failed to delete loadBalancer, err %v", err) + } + gcloudcmd := gce.FirewallToGCloudDeleteCmd(fwName, fakeGCE.ProjectID()) + XPNErrMsg := fmt.Sprintf("%s %s: `%v`", v1.EventTypeNormal, eventMsgFirewallChange, gcloudcmd) + err = test.CheckEvent(recorder, XPNErrMsg, true) + if err != nil { + t.Errorf("Failed to check event, err %v", err) + } +} +*/ + +func TestEnsureInternalLoadBalancerEnableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + frName := l.GetFRName() + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + + // Change service to include the global access annotation + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + betaRuleDescString, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(svc.Namespace, svc.Name), "1.2.3.0", meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when creating description - %v", err) + } + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + if fwdRule.Description != betaRuleDescString { + t.Errorf("Expected description %s, Got %s", betaRuleDescString, fwdRule.Description) + } + // remove the annotation and disable global access. + delete(svc.Annotations, gce.ServiceAnnotationILBAllowGlobalAccess) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + gaRuleDescString, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(svc.Namespace, svc.Name), "1.2.3.0", meta.VersionGA) + if err != nil { + t.Errorf("Unexpected error when creating description - %v", err) + } + // Fetch the beta version of the rule to make sure GlobalAccess field is off. Calling the GA API will always show + // this field as false. + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected true value for AllowGlobalAccess") + } + if fwdRule.Description != gaRuleDescString { + t.Errorf("Expected description %s, Got %s", gaRuleDescString, fwdRule.Description) + } + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerDisableGlobalAccess(t *testing.T) { + t.Parallel() + + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + + nodeNames := []string{"test-node-1"} + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "true" + frName := l.GetFRName() + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + assertInternalLbResources(t, svc, l, nodeNames) + key, err := composite.CreateKey(l.cloud, frName, meta.Regional) + if err != nil { + t.Errorf("Unexpected error when creating key - %v", err) + } + + fwdRule, err := composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if !fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected false value for AllowGlobalAccess") + } + + // disable global access - setting the annotation to false or removing annotation will disable it + svc.Annotations[gce.ServiceAnnotationILBAllowGlobalAccess] = "false" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, key, meta.VersionBeta) + if err != nil { + t.Errorf("Unexpected error when looking up forwarding rule - %v", err) + } + if fwdRule.AllowGlobalAccess { + t.Errorf("Unexpected 'true' value for AllowGlobalAccess") + } + + // Delete the service + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { + t.Parallel() + nodeNames := []string{"test-node-1"} + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + fakeGCE.AlphaFeatureGate = gce.NewAlphaFeatureGate([]string{gce.AlphaFeatureILBCustomSubnet}) + + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + status, err := l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + + frName := l.GetFRName() + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value %s in ILB ForwardingRule", fwdRule.Subnetwork) + } + + // Change service to include the global access annotation and request static ip + requestedIP := "4.5.6.7" + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "test-subnet" + svc.Spec.LoadBalancerIP = requestedIP + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Fatalf("Reserved IP %s not propagated, Got '%s'", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "test-subnet") { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule, expected 'test-subnet'", fwdRule.Subnetwork) + } + + // Change to a different subnet + svc.Annotations[gce.ServiceAnnotationILBSubnet] = "another-subnet" + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + if status.Ingress[0].IP != requestedIP { + t.Errorf("Reserved IP %s not propagated, Got %s", requestedIP, status.Ingress[0].IP) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error looking up forwarding rule - err %v", err) + } + if !strings.HasSuffix(fwdRule.Subnetwork, "another-subnet") { + t.Errorf("Unexpected subnet value' %s' in ILB ForwardingRule, expected 'another-subnet'", fwdRule.Subnetwork) + } + // remove the annotation - ILB should revert to default subnet. + delete(svc.Annotations, gce.ServiceAnnotationILBSubnet) + status, err = l.EnsureInternalLoadBalancer(nodeNames, svc) + if err != nil { + t.Errorf("Failed to ensure loadBalancer, err %v", err) + } + if len(status.Ingress) == 0 { + t.Errorf("Got empty loadBalancer status using handler %v", l) + } + fwdRule, err = composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil || fwdRule == nil { + t.Errorf("Unexpected error %v", err) + } + if fwdRule.Subnetwork != "" { + t.Errorf("Unexpected subnet value '%s' in ILB ForwardingRule.", fwdRule.Subnetwork) + } + // Delete the loadbalancer + err = l.EnsureInternalLoadBalancerDeleted(svc) + if err != nil { + t.Errorf("Unexpected error deleting loadbalancer - err %v", err) + } + assertInternalLbResourcesDeleted(t, svc, true, l) +} + +func TestEnsureInternalFirewallPortRanges(t *testing.T) { + vals := gce.DefaultTestClusterValues() + fakeGCE := getFakeGCECloud(vals) + svc := test.NewL4ILBService(false, 8080) + namer := namer_util.NewNamer(clusterUID, "") + l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100), &sync.Mutex{}) + fwName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + tc := struct { + Input []int + Result []string + }{ + Input: []int{15, 37, 900, 2002, 2003, 2003, 2004, 2004}, Result: []string{"15", "37", "900", "2002-2004"}, + } + c := fakeGCE.Compute().(*cloud.MockGCE) + c.MockFirewalls.InsertHook = nil + c.MockFirewalls.UpdateHook = nil + + nodeNames := []string{"test-node-1"} + _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) + if err != nil { + t.Errorf("Unexpected error when adding nodes %v", err) + } + c.MockFirewalls.InsertHook = nil + c.MockFirewalls.UpdateHook = nil + + sourceRange := []string{"10.0.0.0/20"} + firewalls.EnsureL4InternalFirewallRule( + l.cloud, + fwName, + "1.2.3.4", + utils.ServiceKeyFunc(svc.Namespace, svc.Name), + sourceRange, + utils.GetPortRanges(tc.Input), + nodeNames, + string(v1.ProtocolTCP)) + if err != nil { + t.Errorf("Unexpected error %v when ensuring firewall rule %s for svc %+v", err, fwName, svc) + } + existingFirewall, err := l.cloud.GetFirewall(fwName) + if err != nil || existingFirewall == nil || len(existingFirewall.Allowed) == 0 { + t.Errorf("Unexpected error %v when looking up firewall %s, Got firewall %+v", err, fwName, existingFirewall) + } + existingPorts := existingFirewall.Allowed[0].Ports + if !reflect.DeepEqual(existingPorts, tc.Result) { + t.Errorf("Expected firewall rule with ports %v,got %v", tc.Result, existingPorts) + } +} + +func assertInternalLbResources(t *testing.T, apiService *v1.Service, l *L4, nodeNames []string) { + // Check that Firewalls are created for the LoadBalancer and the HealthCheck + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + resourceDesc, err := utils.MakeL4ILBServiceDescription(utils.ServiceKeyFunc(apiService.Namespace, apiService.Name), "", meta.VersionGA) + if err != nil { + t.Errorf("Failed to create description for resources, err %v", err) + } + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err != nil { + t.Fatalf("Failed to fetch firewall rule %s - err %v", fwName, err) + } + if !utils.EqualStringSets(nodeNames, firewall.TargetTags) { + t.Fatalf("Expected firewall rule target tags '%v', Got '%v'", nodeNames, firewall.TargetTags) + } + if len(firewall.SourceRanges) == 0 { + t.Fatalf("Unexpected empty source range for firewall rule %v", firewall) + } + if firewall.Description != resourceDesc { + t.Errorf("Unexpected description in firewall - Expected %s, Got %s", firewall.Description, resourceDesc) + } + } + + // Check that HealthCheck is created + healthcheck, err := composite.GetHealthCheck(l.cloud, meta.GlobalKey(hcName), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch healthcheck %s - err %v", hcName, err) + } + if healthcheck.Name != hcName { + t.Errorf("Unexpected name for healthcheck '%s' - expected '%s'", healthcheck.Name, hcName) + } + // Only non-shared Healthchecks get a description. + if !sharedHC && healthcheck.Description != resourceDesc { + t.Errorf("Unexpected description in healthcheck - Expected %s, Got %s", healthcheck.Description, resourceDesc) + } + + // Check that BackendService exists + backendServiceName := resourceName + key := meta.RegionalKey(backendServiceName, l.cloud.Region()) + backendServiceLink := cloud.SelfLink(meta.VersionGA, l.cloud.ProjectID(), "backendServices", key) + bs, err := composite.GetBackendService(l.cloud, key, meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch backend service %s - err %v", backendServiceName, err) + } + if bs.Protocol != "TCP" { + t.Errorf("Unexpected protocol '%s' for backend service %v", bs.Protocol, bs) + } + if bs.SelfLink != backendServiceLink { + t.Errorf("Unexpected self link in backend service - Expected %s, Got %s", bs.SelfLink, backendServiceLink) + } + if bs.Description != resourceDesc { + t.Errorf("Unexpected description in backend service - Expected %s, Got %s", bs.Description, resourceDesc) + } + if !utils.EqualStringSets(bs.HealthChecks, []string{healthcheck.SelfLink}) { + t.Errorf("Unexpected healthcheck reference '%v' in backend service, expected '%s'", bs.HealthChecks, + healthcheck.SelfLink) + } + // Check that ForwardingRule is created + frName := l.GetFRName() + fwdRule, err := composite.GetForwardingRule(l.cloud, meta.RegionalKey(frName, l.cloud.Region()), meta.VersionGA) + if err != nil { + t.Errorf("Failed to fetch forwarding rule %s - err %v", frName, err) + + } + if fwdRule.Name != frName { + t.Errorf("Unexpected name for forwarding rule '%s' - expected '%s'", fwdRule.Name, frName) + } + if fwdRule.IPProtocol != "TCP" { + t.Errorf("Unexpected protocol '%s' for forwarding rule %v", fwdRule.IPProtocol, fwdRule) + } + if fwdRule.BackendService != backendServiceLink { + t.Errorf("Unexpected backend service link '%s' for forwarding rule, expected '%s'", fwdRule.BackendService, backendServiceLink) + } + if fwdRule.Subnetwork != l.cloud.NetworkURL() { + t.Errorf("Unexpected subnetwork '%s' in forwarding rule, expected '%s'", + fwdRule.Subnetwork, l.cloud.NetworkURL()) + } +} + +func assertInternalLbResourcesDeleted(t *testing.T, apiService *v1.Service, firewallsDeleted bool, l *L4) { + frName := l.GetFRName() + sharedHC := !servicehelper.RequestsOnlyLocalTraffic(apiService) + resourceName := l.namer.PrimaryIPNEG(l.Service.Namespace, l.Service.Name) + hcName, hcFwName := healthchecks.HealthCheckName(sharedHC, l.namer.UID(), resourceName) + + if firewallsDeleted { + // Check that Firewalls are deleted for the LoadBalancer and the HealthCheck + fwNames := []string{ + resourceName, + hcFwName, + } + + for _, fwName := range fwNames { + firewall, err := l.cloud.GetFirewall(fwName) + if err == nil || firewall != nil { + t.Errorf("Expected error when looking up firewall rule after deletion") + } + } + } + + // Check forwarding rule is deleted + fwdRule, err := l.cloud.GetRegionForwardingRule(frName, l.cloud.Region()) + if err == nil || fwdRule != nil { + t.Errorf("Expected error when looking up forwarding rule after deletion") + } + + // Check that HealthCheck is deleted + healthcheck, err := l.cloud.GetHealthCheck(hcName) + if err == nil || healthcheck != nil { + t.Errorf("Expected error when looking up healthcheck after deletion") + } + bs, err := l.cloud.GetRegionBackendService(resourceName, l.cloud.Region()) + if err == nil || bs != nil { + t.Errorf("Expected error when looking up backend service after deletion") + } +} diff --git a/pkg/test/utils.go b/pkg/test/utils.go index c536f87eca..d82298a331 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -1,21 +1,31 @@ package test import ( + "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/annotations" backendconfig "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/legacy-cloud-providers/gce" + "strings" + "time" ) const ( FinalizerAddFlag = flag("enable-finalizer-add") FinalizerRemoveFlag = flag("enable-finalizer-remove") EnableV2FrontendNamerFlag = flag("enable-v2-frontend-namer") + testServiceName = "ilbtest" + testServiceNamespace = "default" ) var ( @@ -57,6 +67,28 @@ func NewService(name types.NamespacedName, spec api_v1.ServiceSpec) *api_v1.Serv } } +// NewL4ILBService creates a Service of type LoadBalancer with the Internal annotation. +func NewL4ILBService(onlyLocal bool, port int) *api_v1.Service { + svc := &api_v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: testServiceName, + Namespace: testServiceNamespace, + Annotations: map[string]string{gce.ServiceAnnotationLoadBalancerType: string(gce.LBTypeInternal)}, + }, + Spec: api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeLoadBalancer, + SessionAffinity: api_v1.ServiceAffinityClientIP, + Ports: []api_v1.ServicePort{ + {Name: "testport", Port: int32(port), Protocol: "TCP"}, + }, + }, + } + if onlyLocal { + svc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal + } + return svc +} + // NewBackendConfig returns a BackendConfig with the given spec. func NewBackendConfig(name types.NamespacedName, spec backendconfig.BackendConfigSpec) *backendconfig.BackendConfig { return &backendconfig.BackendConfig{ @@ -110,3 +142,97 @@ func (s *FlagSaver) Reset(key flag, flagPointer *bool) { *flagPointer = val } } + +// CreateAndInsertNodes adds the given nodeNames in the given zone as GCE instances, so they can be looked up in tests. +func CreateAndInsertNodes(gce *gce.Cloud, nodeNames []string, zoneName string) ([]*api_v1.Node, error) { + nodes := []*api_v1.Node{} + + for _, name := range nodeNames { + // Inserting the same node name twice causes an error - here we check if + // the instance exists already before insertion. + exists, err := GCEInstanceExists(name, gce) + if err != nil { + return nil, err + } + if !exists { + err := gce.InsertInstance( + gce.ProjectID(), + zoneName, + &compute.Instance{ + Name: name, + Tags: &compute.Tags{ + Items: []string{name}, + }, + }, + ) + if err != nil { + return nodes, err + } + } + + nodes = append( + nodes, + &api_v1.Node{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + api_v1.LabelHostname: name, + api_v1.LabelZoneFailureDomain: zoneName, + }, + }, + Status: api_v1.NodeStatus{ + NodeInfo: api_v1.NodeSystemInfo{ + KubeProxyVersion: "v1.7.2", + }, + }, + }, + ) + + } + return nodes, nil +} + +// GCEInstanceExists returns if a given instance name exists. +func GCEInstanceExists(name string, g *gce.Cloud) (bool, error) { + zones, err := g.GetAllCurrentZones() + if err != nil { + return false, err + } + for _, zone := range zones.List() { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + if _, err := g.Compute().Instances().Get(ctx, meta.ZonalKey(name, zone)); err != nil { + if utils.IsNotFoundError(err) { + return false, nil + } else { + return false, err + } + } else { + // instance has been found + return true, nil + } + } + return false, nil +} + +// CheckEvent watches for events in the given FakeRecorder and checks if it matches the given string. +// It will be used in the l4 firewall XPN tests once TestEnsureLoadBalancerDeletedSucceedsOnXPN and others are +// uncommented. +func CheckEvent(recorder *record.FakeRecorder, expected string, shouldMatch bool) error { + select { + case received := <-recorder.Events: + if strings.HasPrefix(received, expected) != shouldMatch { + if shouldMatch { + return fmt.Errorf("Should receive message \"%v\" but got \"%v\".", expected, received) + } else { + return fmt.Errorf("Unexpected event \"%v\".", received) + } + } + return nil + case <-time.After(2 * time.Second): + if shouldMatch { + return fmt.Errorf("Should receive message \"%v\" but got timed out.", expected) + } + return nil + } +} diff --git a/pkg/utils/common/finalizer.go b/pkg/utils/common/finalizer.go index 4c23506baf..de07803295 100644 --- a/pkg/utils/common/finalizer.go +++ b/pkg/utils/common/finalizer.go @@ -16,8 +16,10 @@ package common import ( "fmt" + v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" client "k8s.io/client-go/kubernetes/typed/networking/v1beta1" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/slice" @@ -29,11 +31,11 @@ const ( // FinalizerKeyV2 is the string representing the Ingress finalizer version. // Ingress with V2 finalizer uses V2 frontend naming scheme. FinalizerKeyV2 = "networking.gke.io/ingress-finalizer-V2" - // FinalizerKeyL4 is the string representing the L4 ILB controller finalizer in this repo. - FinalizerKeyL4 = "networking.gke.io/l4-ilb-v2" - // FinalizerKeyL4V1 is the string representing the service controller finalizer. A service with this finalizer - // is managed by k/k service controller. - FinalizerKeyL4V1 = "networking.gke.io/l4-ilb-v1" + // TODO remove the 2 definitions once they are added in legacy-cloud-providers/gce + // LegacyILBFinalizer key is used to identify ILB services whose resources are managed by service controller. + LegacyILBFinalizer = "gke.networking.io/l4-ilb-v1" + // ILBFinalizerV2 is the finalizer used by newer controllers that implement Internal LoadBalancer services. + ILBFinalizerV2 = "gke.networking.io/l4-ilb-v2" ) // IsDeletionCandidate is true if the passed in meta contains an ingress finalizer. @@ -88,3 +90,33 @@ func EnsureDeleteFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterfa } return nil } + +// EnsureServiceFinalizer patches the service to add finalizer. +func EnsureServiceFinalizer(service *v1.Service, key string, kubeClient kubernetes.Interface) error { + if HasGivenFinalizer(service.ObjectMeta, key) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, key) + + klog.V(2).Infof("Adding finalizer %s to service %s/%s", key, updated.Namespace, updated.Name) + _, err := kubeClient.CoreV1().Services(updated.Namespace).Update(updated) + return err +} + +// removeFinalizer patches the service to remove finalizer. +func EnsureDeleteServiceFinalizer(service *v1.Service, key string, kubeClient kubernetes.Interface) error { + if !HasGivenFinalizer(service.ObjectMeta, key) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, key, nil) + + klog.V(2).Infof("Removing finalizer from service %s/%s", updated.Namespace, updated.Name) + _, err := kubeClient.CoreV1().Services(updated.Namespace).Update(updated) + return err +} diff --git a/pkg/utils/namer/interfaces.go b/pkg/utils/namer/interfaces.go index 41160b6d09..8cd431714c 100644 --- a/pkg/utils/namer/interfaces.go +++ b/pkg/utils/namer/interfaces.go @@ -55,6 +55,8 @@ type BackendNamer interface { // NEG returns the gce neg name based on the service namespace, name // and target port. NEG(namespace, name string, Port int32) string + // PrimaryIPNEG returns the gce neg name based on the service namespace and name + PrimaryIPNEG(namespace, name string) string // InstanceGroup constructs the name for an Instance Group. InstanceGroup() string // NamedPort returns the name for a named port. diff --git a/pkg/utils/namer/namer.go b/pkg/utils/namer/namer.go index 0387660bcc..b03901d563 100644 --- a/pkg/utils/namer/namer.go +++ b/pkg/utils/namer/namer.go @@ -24,6 +24,8 @@ import ( "strings" "sync" + "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/klog" ) @@ -479,7 +481,8 @@ func (n *Namer) PrimaryIPNEG(namespace, name string) string { truncNamespace := truncFields[0] truncName := truncFields[1] // Use the full cluster UID in the suffix to reduce chance of collision. - return fmt.Sprintf("%s-%s-%s-%s", n.negPrefix(), truncNamespace, truncName, negSuffix(n.UID(), namespace, name, "", "")) + return fmt.Sprintf("%s-%s-%s-%s", n.negPrefix(), truncNamespace, truncName, + primaryIPNegSuffix(n.UID(), namespace, name)) } // IsNEG returns true if the name is a NEG owned by this cluster. @@ -494,6 +497,11 @@ func (n *Namer) negPrefix() string { return fmt.Sprintf("%s%s-%s", n.prefix, schemaVersionV1, n.shortUID()) } +// primaryIPNegSuffix returns an 8 character hash code to be used as suffix in GCE_PRIMARY_VM_IP NEG. +func primaryIPNegSuffix(uid, namespace, name string) string { + return common.ContentHash(strings.Join([]string{uid, namespace, name}, ";"), 8) +} + // negSuffix returns hash code with 8 characters func negSuffix(uid, namespace, name, port, subset string) string { negString := strings.Join([]string{uid, namespace, name, port}, ";") diff --git a/pkg/utils/patch.go b/pkg/utils/patch.go index 193a8566be..739bde5f55 100644 --- a/pkg/utils/patch.go +++ b/pkg/utils/patch.go @@ -17,7 +17,10 @@ import ( "encoding/json" "fmt" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) // StrategicMergePatchBytes returns a patch between the old and new object using a strategic merge patch. @@ -40,3 +43,38 @@ func StrategicMergePatchBytes(old, cur, refStruct interface{}) ([]byte, error) { return patchBytes, nil } + +// TODO remove these after picking up https://github.com/kubernetes/kubernetes/pull/87217 +// PatchService patches the given service's Status or ObjectMeta based on the original and +// updated ones. Change to spec will be ignored. +func PatchService(c corev1.CoreV1Interface, oldSvc, newSvc *v1.Service) (*v1.Service, error) { + // Reset spec to make sure only patch for Status or ObjectMeta. + newSvc.Spec = oldSvc.Spec + + patchBytes, err := getPatchBytes(oldSvc, newSvc) + if err != nil { + return nil, err + } + + return c.Services(oldSvc.Namespace).Patch(oldSvc.Name, types.MergePatchType, patchBytes, "status") + +} + +func getPatchBytes(oldSvc, newSvc *v1.Service) ([]byte, error) { + oldData, err := json.Marshal(oldSvc) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err) + } + + newData, err := json.Marshal(newSvc) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for svc %s/%s: %v", newSvc.Namespace, newSvc.Name, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Service{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err) + } + return patchBytes, nil + +} diff --git a/pkg/utils/serviceport.go b/pkg/utils/serviceport.go index 60a2fef160..e73cd6ac6b 100644 --- a/pkg/utils/serviceport.go +++ b/pkg/utils/serviceport.go @@ -46,20 +46,21 @@ type ServicePort struct { NodePort int64 // Numerical port of the Service, retrieved from the Service - Port int32 - Protocol annotations.AppProtocol - TargetPort string - NEGEnabled bool - L7ILBEnabled bool - BackendConfig *backendconfigv1.BackendConfig - BackendNamer namer.BackendNamer + Port int32 + Protocol annotations.AppProtocol + TargetPort string + NEGEnabled bool + PrimaryIPNEGEnabled bool + L7ILBEnabled bool + BackendConfig *backendconfigv1.BackendConfig + BackendNamer namer.BackendNamer } // GetAPIVersionFromServicePort returns the compute API version to be used // for creating NEGs associated with the given ServicePort. func GetAPIVersionFromServicePort(sp *ServicePort) meta.Version { - if sp == nil { - // this uses GCE_VM_PRIMARY_IP NEGS which requires alpha API + if sp.PrimaryIPNEGEnabled { + // this uses VM_PRIMARY_IP_NEGS which requires alpha API return meta.VersionAlpha } return meta.VersionGA @@ -77,6 +78,8 @@ func (sp ServicePort) GetDescription() Description { func (sp ServicePort) BackendName() string { if sp.NEGEnabled { return sp.BackendNamer.NEG(sp.ID.Service.Namespace, sp.ID.Service.Name, sp.Port) + } else if sp.PrimaryIPNEGEnabled { + return sp.BackendNamer.PrimaryIPNEG(sp.ID.Service.Namespace, sp.ID.Service.Name) } return sp.BackendNamer.IGBackend(sp.NodePort) } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index b49f03c9ce..c062678368 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -22,14 +22,20 @@ import ( "errors" "fmt" "net/http" + "sort" + "strconv" "strings" + "k8s.io/kubernetes/pkg/util/slice" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/ingress-gce/pkg/annotations" @@ -70,7 +76,8 @@ const ( LabelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer" // ToBeDeletedTaint is the taint that the autoscaler adds when a node is scheduled to be deleted // https://github.com/kubernetes/autoscaler/blob/cluster-autoscaler-0.5.2/cluster-autoscaler/utils/deletetaint/delete.go#L33 - ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler" + ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler" + L4ILBServiceDescKey = "networking.gke.io/service-name" ) // FrontendGCAlgorithm species GC algorithm used for ingress frontend resources. @@ -85,6 +92,10 @@ const ( // CleanupV2FrontendResources specifies that frontend resources for ingresses // that use v2 naming scheme need to be deleted. CleanupV2FrontendResources + // AffinityTypeNone - no session affinity. + gceAffinityTypeNone = "NONE" + // AffinityTypeClientIP - affinity based on Client IP. + gceAffinityTypeClientIP = "CLIENT_IP" ) // FakeGoogleAPIForbiddenErr creates a Forbidden error with type googleapi.Error @@ -429,15 +440,120 @@ func NumEndpoints(ep *api_v1.Endpoints) (result int) { return result } +// EqualStringSets returns true if 2 given string slices contain the same elements, in any order. +func EqualStringSets(x, y []string) bool { + if len(x) != len(y) { + return false + } + xString := sets.NewString(x...) + yString := sets.NewString(y...) + return xString.Equal(yString) +} + +// GetPortRanges returns a list of port ranges, given a list of ports. +func GetPortRanges(ports []int) (ranges []string) { + if len(ports) < 1 { + return ranges + } + sort.Ints(ports) + + start := ports[0] + prev := ports[0] + for ix, current := range ports { + switch { + case current == prev: + // Loop over duplicates, except if the end of list is reached. + if ix == len(ports)-1 { + if start == current { + ranges = append(ranges, fmt.Sprintf("%d", current)) + } else { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, current)) + } + } + case current == prev+1: + // continue the streak, create the range if this is the last element in the list. + if ix == len(ports)-1 { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, current)) + } + default: + // current is not prev + 1, streak is broken. Construct the range and handle last element case. + if start == prev { + ranges = append(ranges, fmt.Sprintf("%d", prev)) + } else { + ranges = append(ranges, fmt.Sprintf("%d-%d", start, prev)) + } + if ix == len(ports)-1 { + ranges = append(ranges, fmt.Sprintf("%d", current)) + } + // reset start element + start = current + } + prev = current + } + return ranges +} + +// GetPortsAndProtocol returns the list of ports, list of port ranges and the protocol given the list of k8s port info. +func GetPortsAndProtocol(svcPorts []api_v1.ServicePort) (ports []string, portRanges []string, protocol api_v1.Protocol) { + if len(svcPorts) == 0 { + return []string{}, []string{}, api_v1.ProtocolUDP + } + + // GCP doesn't support multiple protocols for a single load balancer + protocol = svcPorts[0].Protocol + portInts := []int{} + for _, p := range svcPorts { + ports = append(ports, strconv.Itoa(int(p.Port))) + portInts = append(portInts, int(p.Port)) + } + + return ports, GetPortRanges(portInts), protocol +} + +// TranslateAffinityType converts the k8s affinity type to the GCE affinity type. +func TranslateAffinityType(affinityType string) string { + switch affinityType { + case string(api_v1.ServiceAffinityClientIP): + return gceAffinityTypeClientIP + case string(api_v1.ServiceAffinityNone): + return gceAffinityTypeNone + default: + klog.Errorf("Unexpected affinity type: %v", affinityType) + return gceAffinityTypeNone + } +} + // IsLegacyL4ILBService returns true if the given LoadBalancer service is managed by service controller. func IsLegacyL4ILBService(svc *api_v1.Service) bool { - for _, key := range svc.ObjectMeta.Finalizers { - if key == common.FinalizerKeyL4V1 { - // service has v1 finalizer, this is handled by service controller code. - return true - } + return slice.ContainsString(svc.ObjectMeta.Finalizers, common.LegacyILBFinalizer, nil) +} + +// L4ILBResourceDescription stores the description fields for L4 ILB resources. +// This is useful to indetify which resources correspond to which L4 ILB service. +type L4ILBResourceDescription struct { + // ServiceName indicates the name of the service the resource is for. + ServiceName string `json:"networking.gke.io/service-name"` + // APIVersion stores the version og the compute API used to create this resource. + APIVersion meta.Version `json:"networking.gke.io/api-version,omitempty"` + ServiceIP string `json:"networking.gke.io/service-ip,omitempty"` +} + +// Marshal returns the description as a JSON-encoded string. +func (d *L4ILBResourceDescription) Marshal() (string, error) { + out, err := json.Marshal(d) + if err != nil { + return "", err } - return false + return string(out), err +} + +// Unmarshal converts the JSON-encoded description string into the struct. +func (d *L4ILBResourceDescription) Unmarshal(desc string) error { + return json.Unmarshal([]byte(desc), d) +} + +func MakeL4ILBServiceDescription(svcName, ip string, version meta.Version) (string, error) { + return (&L4ILBResourceDescription{ServiceName: svcName, ServiceIP: ip, APIVersion: version}).Marshal() } // NewStringPointer returns a pointer to the provided string literal diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 6541e6fca7..9b29480040 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -21,7 +21,9 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/google/go-cmp/cmp" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils/common" @@ -762,7 +764,7 @@ func TestIsLegacyL4ILBService(t *testing.T) { Name: "testsvc", Namespace: "default", Annotations: map[string]string{gce.ServiceAnnotationLoadBalancerType: string(gce.LBTypeInternal)}, - Finalizers: []string{common.FinalizerKeyL4V1}, + Finalizers: []string{common.LegacyILBFinalizer}, }, Spec: api_v1.ServiceSpec{ Type: api_v1.ServiceTypeLoadBalancer, @@ -780,5 +782,29 @@ func TestIsLegacyL4ILBService(t *testing.T) { if IsLegacyL4ILBService(svc) { t.Errorf("Expected False for Legacy service %s, got True", svc.Name) } +} +func TestGetPortRanges(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + Desc string + Input []int + Result []string + }{ + {Desc: "All Unique", Input: []int{8, 66, 23, 13, 89}, Result: []string{"8", "13", "23", "66", "89"}}, + {Desc: "All Unique Sorted", Input: []int{1, 7, 9, 16, 26}, Result: []string{"1", "7", "9", "16", "26"}}, + {Desc: "Ranges", Input: []int{56, 78, 67, 79, 21, 80, 12}, Result: []string{"12", "21", "56", "67", "78-80"}}, + {Desc: "Ranges Sorted", Input: []int{5, 7, 90, 1002, 1003, 1004, 1005, 2501}, Result: []string{"5", "7", "90", "1002-1005", "2501"}}, + {Desc: "Ranges Duplicates", Input: []int{15, 37, 900, 2002, 2003, 2003, 2004, 2004}, Result: []string{"15", "37", "900", "2002-2004"}}, + {Desc: "Duplicates", Input: []int{10, 10, 10, 10, 10}, Result: []string{"10"}}, + {Desc: "Only ranges", Input: []int{18, 19, 20, 21, 22, 55, 56, 77, 78, 79, 3504, 3505, 3506}, Result: []string{"18-22", "55-56", "77-79", "3504-3506"}}, + {Desc: "Single Range", Input: []int{6000, 6001, 6002, 6003, 6004, 6005}, Result: []string{"6000-6005"}}, + {Desc: "One value", Input: []int{12}, Result: []string{"12"}}, + {Desc: "Empty", Input: []int{}, Result: nil}, + } { + result := GetPortRanges(tc.Input) + if diff := cmp.Diff(result, tc.Result); diff != "" { + t.Errorf("GetPortRanges(%s) mismatch, (-want +got): \n%s", tc.Desc, diff) + } + } } From e2c4c2db7bc056884c63ab99142445fe51c45787 Mon Sep 17 00:00:00 2001 From: Pavithra Ramesh Date: Thu, 2 Jan 2020 11:44:35 -0800 Subject: [PATCH 3/3] Autogenerated vendor changes. --- vendor/k8s.io/kubernetes/pkg/apis/core/BUILD | 60 + vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS | 47 + .../pkg/apis/core/annotation_key_constants.go | 110 + vendor/k8s.io/kubernetes/pkg/apis/core/doc.go | 24 + .../pkg/apis/core/field_constants.go | 38 + .../kubernetes/pkg/apis/core/helper/BUILD | 51 + .../pkg/apis/core/helper/helpers.go | 524 ++ .../k8s.io/kubernetes/pkg/apis/core/json.go | 28 + .../pkg/apis/core/objectreference.go | 34 + .../kubernetes/pkg/apis/core/register.go | 98 + .../kubernetes/pkg/apis/core/resource.go | 55 + .../k8s.io/kubernetes/pkg/apis/core/taint.go | 36 + .../kubernetes/pkg/apis/core/toleration.go | 30 + .../k8s.io/kubernetes/pkg/apis/core/types.go | 4807 ++++++++++++++ .../kubernetes/pkg/apis/core/v1/helper/BUILD | 52 + .../pkg/apis/core/v1/helper/helpers.go | 510 ++ .../pkg/apis/core/zz_generated.deepcopy.go | 5504 +++++++++++++++++ vendor/modules.txt | 3 + 18 files changed, 12011 insertions(+) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/json.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/resource.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD new file mode 100644 index 0000000000..6ca92930e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "annotation_key_constants.go", + "doc.go", + "field_constants.go", + "json.go", + "objectreference.go", + "register.go", + "resource.go", + "taint.go", + "toleration.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "taint_test.go", + "toleration_test.go", + ], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/fuzzer:all-srcs", + "//pkg/apis/core/helper:all-srcs", + "//pkg/apis/core/install:all-srcs", + "//pkg/apis/core/pods:all-srcs", + "//pkg/apis/core/v1:all-srcs", + "//pkg/apis/core/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS new file mode 100644 index 0000000000..f062ebd6b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS @@ -0,0 +1,47 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +- liggitt +# - bgrant0607 # manual escalations only +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- tallclair +- yifan-gu +- eparis +- mwielgus +- soltysh +- piosz +- jsafrane +- jbeda +labels: +- sig/apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go new file mode 100644 index 0000000000..688287611e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package core + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + SeccompProfileRuntimeDefault string = "runtime/default" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go new file mode 100644 index 0000000000..6017bfdab1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go new file mode 100644 index 0000000000..a26f80568c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "action" + EventSourceField = "reportingComponent" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD new file mode 100644 index 0000000000..d4fa9fea86 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/helper", + deps = [ + "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/helper/qos:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go new file mode 100644 index 0000000000..835e2cbf19 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -0,0 +1,524 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/apis/core" +) + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) +} + +// IsQuotaHugePageResourceName returns true if the resource name has the quota +// related huge page resource prefix. +func IsQuotaHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { + return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + +// Semantic can do semantic deep equality checks for core objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(core.ResourceQuotaScopeTerminating), + string(core.ResourceQuotaScopeNotTerminating), + string(core.ResourceQuotaScopeBestEffort), + string(core.ResourceQuotaScopeNotBestEffort), + string(core.ResourceQuotaScopePriorityClass), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(core.ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { + switch scope { + case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopePriorityClass: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case core.ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) +} + +// IsExtendedResourceName returns true if: +// 1. the resource name is not in the default namespace; +// 2. resource name does not have "requests." prefix, +// to avoid confusion with the convention in quota +// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name +func IsExtendedResourceName(name core.ResourceName) bool { + if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) { + return false + } + // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name + nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name)) + if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { + return false + } + return true +} + +// IsNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsNativeResource(name core.ResourceName) bool { + return !strings.Contains(string(name), "/") || + strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) +} + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and is not hugepages. +func IsOvercommitAllowed(name core.ResourceName) bool { + return IsNativeResource(name) && + !IsHugePageResourceName(name) +} + +var standardLimitRangeTypes = sets.NewString( + string(core.LimitTypePod), + string(core.LimitTypeContainer), + string(core.LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsStorage), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceConfigMaps), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var standardResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceStorage), + string(core.ResourceRequestsStorage), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var integerResources = sets.NewString( + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) +} + +// IsServiceIPSet aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *core.Service) bool { + return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" +} + +var standardFinalizers = sets.NewString( + string(core.FinalizerKubernetes), + metav1.FinalizerOrphanDependents, + metav1.FinalizerDeleteDependents, +) + +// IsStandardFinalizerName checks if the input string is a standard finalizer name +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// LoadBalancerStatusEqual checks if the status of the load balancer is equal to the target status +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, core.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, core.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, core.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []core.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, core.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, core.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, core.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { + accessModes := []core.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case core.NodeSelectorOpIn: + op = selection.In + case core.NodeSelectorOpNotIn: + op = selection.NotIn + case core.NodeSelectorOpExists: + op = selection.Exists + case core.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case core.NodeSelectorOpGt: + op = selection.GreaterThan + case core.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements +// fields.Selector. +func NodeSelectorRequirementsAsFieldSelector(nsm []core.NodeSelectorRequirement) (fields.Selector, error) { + if len(nsm) == 0 { + return fields.Nothing(), nil + } + + selectors := []fields.Selector{} + for _, expr := range nsm { + switch expr.Operator { + case core.NodeSelectorOpIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) + + case core.NodeSelectorOpNotIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) + + default: + return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) + } + } + + return fields.AndSelectors(selectors...), nil +} + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in core. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { + var tolerations []core.Toleration + if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { + podTolerations := pod.Spec.Tolerations + + var newTolerations []core.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in core. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { + var taints []core.Taint + if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) + if err != nil { + return []core.Taint{}, err + } + } + return taints, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *core.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go new file mode 100644 index 0000000000..937cd056c0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go new file mode 100644 index 0000000000..55b27f30b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go new file mode 100644 index 0000000000..c6cd8681d8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go new file mode 100644 index 0000000000..1367e00e56 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go new file mode 100644 index 0000000000..ae1feb74d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go new file mode 100644 index 0000000000..1dfbc9f1bb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go new file mode 100644 index 0000000000..497b9924c6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -0,0 +1,4807 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic = "kube-public" + // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) + NamespaceNodeLease = "kube-node-lease" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + CSI *CSIVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsPersistentVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDPersistentVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIPersistentVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexPersistentVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderPersistentVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver. + // +optional + CSI *CSIPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's deprecated and will be removed in a future release. (#51440) + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // +optional + MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // This field can be used to specify either: + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + // * An existing PVC (PersistentVolumeClaim) + // In order to use either of these DataSource types, the appropriate feature gate + // must be enabled (VolumeSnapshotDataSource, VolumePVCDataSource) + // If the provisioner can support the specified data source, it will create + // a new volume based on the contents of the specified PVC or Snapshot. + // If the provisioner does not support the specified data source, the volume will + // not be created and the failure will be reported as an event. + // In the future, we plan to support more data source types and the behavior + // of the provisioner may change. + // +optional + DataSource *TypedLocalObjectReference +} + +type PersistentVolumeClaimConditionType string + +// These are valid conditions of Pvc +const ( + // An user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList + // +optional + Conditions []PersistentVolumeClaimCondition +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // If the path is a symlink, it will follow the link to the real path. + Path string + // Defaults to "" + Type *HostPathType +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" + // ProtocolSCTP is the SCTP protocol. + ProtocolSCTP Protocol = "SCTP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string + // Optional: FC target lun number + // +optional + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: FC volume World Wide Identifiers (WWIDs) + // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. + // +optional + WWIDs []string +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + Tenant string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderPersistentVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *SecretReference +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined +// by a an admin via a storage class, for instance. +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + Audience string + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + ExpirationSeconds int64 + // Path is the path relative to the mount point of the file to project the + // token into. + Path string +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection + // information about the serviceAccountToken data to project + ServiceAccountToken *ServiceAccountTokenProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity (Beta feature) +type LocalVolumeSource struct { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + Path string + + // Filesystem type to mount. + // It applies only when the Path is a block device. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // +optional + FSType *string +} + +// Represents storage that is managed by an external CSI volume driver. +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // +optional + FSType string + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference +} + +// Represents a source location of a volume to mount, managed by an external CSI driver +type CSIVolumeSource struct { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + // Required. + Driver string + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + ReadOnly *bool + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + FSType *string + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + VolumeAttributes map[string]string + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + NodePublishSecretRef *LocalObjectReference +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP", "UDP" and "SCTP" + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // This field is beta in 1.15. + // +optional + SubPathExpr string +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or its key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +optional + VolumeDevices []VolumeDevice + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +type NodeSelectorTerm struct { + // A list of node selector requirements by node's labels. + MatchExpressions []NodeSelectorRequirement + // A list of node selector requirements by node's fields. + MatchFields []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +type TopologySelectorTerm struct { + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string +} + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType +} + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *PreemptionPolicy + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + ReadinessGates []PodReadinessGate + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // This is a beta feature as of Kubernetes v1.14. + // +optional + RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // Windows security options. + // +optional + WindowsOptions *WindowsSecurityContextOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + // +optional + Reason string + // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // +optional + NominatedNodeName string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const ( + // DefaultClientIPServiceAffinitySeconds is the default timeout seconds + // of Client IP based session affinity - 3 hours. + DefaultClientIPServiceAffinitySeconds int32 = 10800 + // MaxClientIPServiceAffinitySeconds is the max timeout seconds + // of Client IP based session affinity - 1 day. + MaxClientIPServiceAffinitySeconds int32 = 86400 +) + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + PublishNotReadyAddresses bool +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + DoNotUse_ExternalID string +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + ConfigMap *ConfigMapNodeConfigSource +} + +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercase for backwards compatibility (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +type NodeConfigStatus struct { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + Assigned *NodeConfigSource + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + Active *NodeConfigSource + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + LastKnownGood *NodeConfigSource + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + Error string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + Config *NodeConfigStatus +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string + // Kind is the type of resource being referenced + Kind string + // Name is the name of resource being referenced + Name string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. Mapped to events.Event.regarding + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. Mapped to events.Event.note + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string +} + +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metainternalversion.List + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope + // ScopeSelector is also a collection of filters like Scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // +optional + ScopeSelector *ScopeSelector +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector terms. +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authentication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // Windows security options. + // +optional + WindowsOptions *WindowsSecurityContextOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // +optional + AllowPrivilegeEscalation *bool + // ProcMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // +optional + ProcMount *ProcMountType +} + +type ProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + DefaultProcMount ProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays intact with + // no modifications. + UnmaskedProcMount ProcMountType = "Unmasked" +) + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +type WindowsSecurityContextOptions struct { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpecName *string + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. + // +optional + GMSACredentialSpec *string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD new file mode 100644 index 0000000000..79483ac3d2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD @@ -0,0 +1,52 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper", + deps = [ + "//pkg/apis/core/helper:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/v1/helper/qos:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go new file mode 100644 index 0000000000..e1dff4fdcf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -0,0 +1,510 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +// IsExtendedResourceName returns true if: +// 1. the resource name is not in the default namespace; +// 2. resource name does not have "requests." prefix, +// to avoid confusion with the convention in quota +// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name +func IsExtendedResourceName(name v1.ResourceName) bool { + if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) { + return false + } + // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name + nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name)) + if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { + return false + } + return true +} + +// IsPrefixedNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. +func IsPrefixedNativeResource(name v1.ResourceName) bool { + return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) +} + +// IsNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsNativeResource(name v1.ResourceName) bool { + return !strings.Contains(string(name), "/") || + IsPrefixedNativeResource(name) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { + return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and is not hugepages. +func IsOvercommitAllowed(name v1.ResourceName) bool { + return IsNativeResource(name) && + !IsHugePageResourceName(name) +} + +func IsAttachableVolumeResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix) +} + +// Extended and Hugepages resources +func IsScalarResourceName(name v1.ResourceName) bool { + return IsExtendedResourceName(name) || IsHugePageResourceName(name) || + IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *v1.Service) bool { + return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { + c := &v1.LoadBalancerStatus{} + c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []v1.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, v1.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, v1.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, v1.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case v1.NodeSelectorOpIn: + op = selection.In + case v1.NodeSelectorOpNotIn: + op = selection.NotIn + case v1.NodeSelectorOpExists: + op = selection.Exists + case v1.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case v1.NodeSelectorOpGt: + op = selection.GreaterThan + case v1.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements +// fields.Selector. +func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) { + if len(nsm) == 0 { + return fields.Nothing(), nil + } + + selectors := []fields.Selector{} + for _, expr := range nsm { + switch expr.Operator { + case v1.NodeSelectorOpIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) + + case v1.NodeSelectorOpNotIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) + + default: + return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) + } + } + + return fields.AndSelectors(selectors...), nil +} + +// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms +func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool { + for _, req := range reqs { + for _, term := range terms { + for _, r := range term.MatchExpressions { + if r.Key == req.Key { + return true + } + } + } + } + return false +} + +// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed; +// nil or empty term matches no objects. +func MatchNodeSelectorTerms( + nodeSelectorTerms []v1.NodeSelectorTerm, + nodeLabels labels.Set, + nodeFields fields.Set, +) bool { + for _, req := range nodeSelectorTerms { + // nil or empty term selects no objects + if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 { + continue + } + + if len(req.MatchExpressions) != 0 { + labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions) + if err != nil || !labelSelector.Matches(nodeLabels) { + continue + } + } + + if len(req.MatchFields) != 0 { + fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields) + if err != nil || !fieldSelector.Matches(nodeFields) { + continue + } + } + + return true + } + + return false +} + +// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct +// that implements labels.Selector. +func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) { + if len(tsm) == 0 { + return labels.Nothing(), nil + } + + selector := labels.NewSelector() + for _, expr := range tsm { + r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + + return selector, nil +} + +// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed; +// nil or empty term matches no objects; while empty term list matches all objects. +func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool { + if len(topologySelectorTerms) == 0 { + // empty term list matches all objects + return true + } + + for _, req := range topologySelectorTerms { + // nil or empty term selects no objects + if len(req.MatchLabelExpressions) == 0 { + continue + } + + labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions) + if err != nil || !labelSelector.Matches(lbls) { + continue + } + + return true + } + + return false +} + +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { + podTolerations := spec.Tolerations + + var newTolerations []v1.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + spec.Tolerations = newTolerations + return true +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*v1.Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { + if len(taints) == 0 { + return true, []v1.Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []v1.Toleration{} + } + result := []v1.Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []v1.Toleration{} + } + } + return true, result +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { + var avoidPods v1.AvoidPods + if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ae3343d0ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,5504 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package core + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. +func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { + if in == nil { + return nil + } + out := new(CSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. +func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CinderPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. +func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { + if in == nil { + return nil + } + out := new(ConfigMapNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + if in.Related != nil { + in, out := &in.Related, &out.Related + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HostPathType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapNodeConfigSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { + *out = *in + if in.Assigned != nil { + in, out := &in.Assigned, &out.Assigned + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.LastKnownGood != nil { + in, out := &in.LastKnownGood, &out.LastKnownGood + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. +func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { + if in == nil { + return nil + } + out := new(NodeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchFields != nil { + in, out := &in.MatchFields, &out.MatchFields + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]Sysctl, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + *out = new(v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(PreemptionPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]PodReadinessGate, len(*in)) + copy(*out, *in) + } + if in.RuntimeClassName != nil { + in, out := &in.RuntimeClassName, &out.RuntimeClassName + *out = new(string) + **out = **in + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]ScopedResourceSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. +func (in *ScopeSelector) DeepCopy() *ScopeSelector { + if in == nil { + return nil + } + out := new(ScopeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. +func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { + if in == nil { + return nil + } + out := new(ScopedResourceSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.ProcMount != nil { + in, out := &in.ProcMount, &out.ProcMount + *out = new(ProcMountType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. +func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { + if in == nil { + return nil + } + out := new(TopologySelectorLabelRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { + *out = *in + if in.MatchLabelExpressions != nil { + in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions + *out = make([]TopologySelectorLabelRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. +func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { + if in == nil { + return nil + } + out := new(TopologySelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. +func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { + if in == nil { + return nil + } + out := new(TypedLocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + *out = new(MountPropagationMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(ServiceAccountTokenProjection) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { + *out = *in + if in.GMSACredentialSpecName != nil { + in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName + *out = new(string) + **out = **in + } + if in.GMSACredentialSpec != nil { + in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. +func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { + if in == nil { + return nil + } + out := new(WindowsSecurityContextOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2eac1a5152..aa9a05245e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -518,9 +518,12 @@ k8s.io/klog k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/util/proto # k8s.io/kubernetes v1.15.0 => k8s.io/kubernetes v1.15.0 +k8s.io/kubernetes/pkg/apis/core/v1/helper k8s.io/kubernetes/pkg/client/leaderelectionconfig k8s.io/kubernetes/pkg/util/node k8s.io/kubernetes/pkg/util/slice +k8s.io/kubernetes/pkg/apis/core/helper +k8s.io/kubernetes/pkg/apis/core # k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.0.0-20191210234026-b5fed2ccee23 k8s.io/legacy-cloud-providers/gce # k8s.io/utils v0.0.0-20191114184206-e782cd3c129f