Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Slight refactor of controller context to include both NEG & BackendConfig switches #299

Merged
merged 1 commit into from
Jun 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/glbc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ func main() {

enableNEG := cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureNetworkEndpointGroup)
stopCh := make(chan struct{})
ctx := context.NewControllerContext(kubeClient, backendConfigClient, flags.F.WatchNamespace, flags.F.ResyncPeriod, enableNEG)
lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager, enableNEG, flags.F.EnableBackendConfig)
ctx := context.NewControllerContext(kubeClient, backendConfigClient, flags.F.WatchNamespace, flags.F.ResyncPeriod, enableNEG, flags.F.EnableBackendConfig)
lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager)
if err != nil {
glog.Fatalf("Error creating load balancer controller: %v", err)
}
Expand Down
29 changes: 16 additions & 13 deletions pkg/context/context.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
/*
Copyright 2017 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand All @@ -33,7 +30,7 @@ import (
informerbackendconfig "k8s.io/ingress-gce/pkg/backendconfig/client/informers/externalversions/backendconfig/v1beta1"
)

// ControllerContext holds
// ControllerContext holds the state needed for the execution of the controller.
type ControllerContext struct {
KubeClient kubernetes.Interface

Expand All @@ -44,6 +41,9 @@ type ControllerContext struct {
NodeInformer cache.SharedIndexInformer
EndpointInformer cache.SharedIndexInformer

NEGEnabled bool
BackendConfigEnabled bool

// Map of namespace => record.EventRecorder.
recorders map[string]record.EventRecorder
}
Expand All @@ -54,23 +54,26 @@ func NewControllerContext(
backendConfigClient backendconfigclient.Interface,
namespace string,
resyncPeriod time.Duration,
enableEndpointsInformer bool) *ControllerContext {
enableNEG bool,
enableBackendConfig bool) *ControllerContext {

newIndexer := func() cache.Indexers {
return cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
}
context := &ControllerContext{
KubeClient: kubeClient,
IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
ServiceInformer: informerv1.NewServiceInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
PodInformer: informerv1.NewPodInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
NodeInformer: informerv1.NewNodeInformer(kubeClient, resyncPeriod, newIndexer()),
recorders: map[string]record.EventRecorder{},
KubeClient: kubeClient,
IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
ServiceInformer: informerv1.NewServiceInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
PodInformer: informerv1.NewPodInformer(kubeClient, namespace, resyncPeriod, newIndexer()),
NodeInformer: informerv1.NewNodeInformer(kubeClient, resyncPeriod, newIndexer()),
NEGEnabled: enableNEG,
BackendConfigEnabled: enableBackendConfig,
recorders: map[string]record.EventRecorder{},
}
if enableEndpointsInformer {
if enableNEG {
context.EndpointInformer = informerv1.NewEndpointsInformer(kubeClient, namespace, resyncPeriod, newIndexer())
}
if backendConfigClient != nil {
if enableBackendConfig {
context.BackendConfigInformer = informerbackendconfig.NewBackendConfigInformer(backendConfigClient, namespace, resyncPeriod, newIndexer())
}

Expand Down
15 changes: 5 additions & 10 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,6 @@ type LoadBalancerController struct {
// hasSynced returns true if all associated sub-controllers have synced.
// Abstracted into a func for testing.
hasSynced func() bool
// negEnabled indicates whether NEG feature is enabled.
negEnabled bool
}

// NewLoadBalancerController creates a controller for gce loadbalancers.
Expand All @@ -97,9 +95,7 @@ func NewLoadBalancerController(
kubeClient kubernetes.Interface,
stopCh chan struct{},
ctx *context.ControllerContext,
clusterManager *ClusterManager,
negEnabled bool,
backendConfigEnabled bool) (*LoadBalancerController, error) {
clusterManager *ClusterManager) (*LoadBalancerController, error) {

broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
Expand All @@ -115,11 +111,10 @@ func NewLoadBalancerController(
CloudClusterManager: clusterManager,
stopCh: stopCh,
hasSynced: ctx.HasSynced,
negEnabled: negEnabled,
}
lbc.ingQueue = utils.NewPeriodicTaskQueue("ingresses", lbc.sync)

if negEnabled {
if ctx.NEGEnabled {
lbc.endpointLister.Indexer = ctx.EndpointInformer.GetIndexer()
}

Expand Down Expand Up @@ -173,7 +168,7 @@ func NewLoadBalancerController(
})

// BackendConfig event handlers.
if backendConfigEnabled {
if ctx.BackendConfigEnabled {
ctx.BackendConfigInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: lbc.enqueueIngressForObject,
UpdateFunc: func(old, cur interface{}) {
Expand All @@ -185,7 +180,7 @@ func NewLoadBalancerController(
})
}

lbc.Translator = translator.NewTranslator(lbc.CloudClusterManager.ClusterNamer, ctx, negEnabled)
lbc.Translator = translator.NewTranslator(lbc.CloudClusterManager.ClusterNamer, ctx)
lbc.tlsLoader = &tls.TLSCertsFromSecretsLoader{Client: lbc.client}

glog.V(3).Infof("Created new loadbalancer controller")
Expand Down Expand Up @@ -372,7 +367,7 @@ func (lbc *LoadBalancerController) ensureIngress(ing *extensions.Ingress, nodeNa
}

// If NEG enabled, link the backend services to the NEGs.
if lbc.negEnabled {
if lbc.ctx.NEGEnabled {
for _, svcPort := range ingSvcPorts {
if svcPort.NEGEnabled {
zones, err := lbc.Translator.ListZones()
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalanc
backendConfigClient := backendconfigclient.NewSimpleClientset()

stopCh := make(chan struct{})
ctx := context.NewControllerContext(kubeClient, backendConfigClient, api_v1.NamespaceAll, 1*time.Minute, true)
lbc, err := NewLoadBalancerController(kubeClient, stopCh, ctx, cm.ClusterManager, true, true)
ctx := context.NewControllerContext(kubeClient, backendConfigClient, api_v1.NamespaceAll, 1*time.Minute, true, false)
lbc, err := NewLoadBalancerController(kubeClient, stopCh, ctx, cm.ClusterManager)
if err != nil {
t.Fatalf("%v", err)
}
Expand Down
32 changes: 13 additions & 19 deletions pkg/controller/translator/translator.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
/*
Copyright 2017 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand Down Expand Up @@ -43,19 +40,14 @@ import (
)

// NewTranslator returns a new Translator.
func NewTranslator(namer *utils.Namer, ctx *context.ControllerContext, negEnabled bool) *Translator {
return &Translator{
namer: namer,
ctx: ctx,
negEnabled: negEnabled,
}
func NewTranslator(namer *utils.Namer, ctx *context.ControllerContext) *Translator {
return &Translator{namer, ctx}
}

// Translator helps with kubernetes -> gce api conversion.
type Translator struct {
namer *utils.Namer
ctx *context.ControllerContext
negEnabled bool
namer *utils.Namer
ctx *context.ControllerContext
}

// getServicePort looks in the svc store for a matching service:port,
Expand Down Expand Up @@ -113,7 +105,7 @@ PortLoop:
}

var beConfig *backendconfigv1beta1.BackendConfig
if t.ctx.BackendConfigInformer != nil {
if t.ctx.BackendConfigEnabled {
beConfig, err := backendconfig.GetBackendConfigForServicePort(t.ctx.BackendConfigInformer.GetIndexer(), svc, port)
if err != nil {
return nil, errors.ErrSvcBackendConfig{ServicePortID: id, Err: err}
Expand All @@ -131,7 +123,7 @@ PortLoop:
NodePort: int64(port.NodePort),
Protocol: proto,
SvcTargetPort: port.TargetPort.String(),
NEGEnabled: t.negEnabled && negEnabled,
NEGEnabled: t.ctx.NEGEnabled && negEnabled,
BackendConfig: beConfig,
}, nil
}
Expand Down Expand Up @@ -200,7 +192,8 @@ func getZone(n *api_v1.Node) string {

// GetZoneForNode returns the zone for a given node by looking up its zone label.
func (t *Translator) GetZoneForNode(name string) (string, error) {
nodes, err := listers.NewNodeLister(t.ctx.NodeInformer.GetIndexer()).ListWithPredicate(utils.NodeIsReady)
nodeLister := t.ctx.NodeInformer.GetIndexer()
nodes, err := listers.NewNodeLister(nodeLister).ListWithPredicate(utils.NodeIsReady)
if err != nil {
return "", err
}
Expand All @@ -217,7 +210,8 @@ func (t *Translator) GetZoneForNode(name string) (string, error) {
// ListZones returns a list of zones this Kubernetes cluster spans.
func (t *Translator) ListZones() ([]string, error) {
zones := sets.String{}
readyNodes, err := listers.NewNodeLister(t.ctx.NodeInformer.GetIndexer()).ListWithPredicate(utils.NodeIsReady)
nodeLister := t.ctx.NodeInformer.GetIndexer()
readyNodes, err := listers.NewNodeLister(nodeLister).ListWithPredicate(utils.NodeIsReady)
if err != nil {
return zones.List(), err
}
Expand Down Expand Up @@ -281,12 +275,12 @@ func (t *Translator) getHTTPProbe(svc api_v1.Service, targetPort intstr.IntOrStr
// GatherEndpointPorts returns all ports needed to open NEG endpoints.
func (t *Translator) GatherEndpointPorts(svcPorts []utils.ServicePort) []string {
portMap := map[int64]bool{}
for _, sp := range svcPorts {
if t.negEnabled && sp.NEGEnabled {
for _, p := range svcPorts {
if t.ctx.NEGEnabled && p.NEGEnabled {
// For NEG backend, need to open firewall to all endpoint target ports
// TODO(mixia): refactor firewall syncing into a separate go routine with different trigger.
// With NEG, endpoint changes may cause firewall ports to be different if user specifies inconsistent backends.
endpointPorts := listEndpointTargetPorts(t.ctx.EndpointInformer.GetIndexer(), sp.ID.Service.Namespace, sp.ID.Service.Name, sp.SvcTargetPort)
endpointPorts := listEndpointTargetPorts(t.ctx.EndpointInformer.GetIndexer(), p.ID.Service.Namespace, p.ID.Service.Name, p.SvcTargetPort)
for _, ep := range endpointPorts {
portMap[int64(ep)] = true
}
Expand Down
7 changes: 3 additions & 4 deletions pkg/controller/translator/translator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,10 @@ func fakeTranslator(negEnabled bool) *Translator {
backendConfigClient := backendconfigclient.NewSimpleClientset()

namer := utils.NewNamer("uid1", "")
ctx := context.NewControllerContext(client, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, negEnabled)
ctx := context.NewControllerContext(client, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, negEnabled, false)
gce := &Translator{
namer: namer,
ctx: ctx,
negEnabled: negEnabled,
namer: namer,
ctx: ctx,
}
return gce
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/neg/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (

func newTestController(kubeClient kubernetes.Interface) *Controller {
backendConfigClient := backendconfigclient.NewSimpleClientset()
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true)
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true, false)
controller, _ := NewController(kubeClient,
NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network"),
context,
Expand Down
2 changes: 1 addition & 1 deletion pkg/neg/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const (

func NewTestSyncerManager(kubeClient kubernetes.Interface) *syncerManager {
backendConfigClient := backendconfigclient.NewSimpleClientset()
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true)
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true, false)
manager := newSyncerManager(
utils.NewNamer(CluseterID, ""),
record.NewFakeRecorder(100),
Expand Down
2 changes: 1 addition & 1 deletion pkg/neg/syncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ const (
func NewTestSyncer() *syncer {
kubeClient := fake.NewSimpleClientset()
backendConfigClient := backendconfigclient.NewSimpleClientset()
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true)
context := context.NewControllerContext(kubeClient, backendConfigClient, apiv1.NamespaceAll, 1*time.Second, true, false)
svcPort := servicePort{
namespace: testServiceNamespace,
name: testServiceName,
Expand Down