Skip to content

Commit

Permalink
Extract firewall management into separate controller
Browse files Browse the repository at this point in the history
  • Loading branch information
rramkumar1 committed Jul 24, 2018
1 parent d580056 commit ebaf600
Show file tree
Hide file tree
Showing 18 changed files with 584 additions and 272 deletions.
10 changes: 8 additions & 2 deletions cmd/glbc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"k8s.io/ingress-gce/cmd/glbc/app"
"k8s.io/ingress-gce/pkg/backendconfig"
"k8s.io/ingress-gce/pkg/crd"
"k8s.io/ingress-gce/pkg/firewalls"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/version"
)
Expand Down Expand Up @@ -166,9 +167,9 @@ func makeLeaderElectionConfig(client clientset.Interface, recorder record.EventR
}

func runControllers(ctx *context.ControllerContext) {
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, controller.DefaultFirewallName)
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, firewalls.DefaultFirewallName)
if err != nil {
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, controller.DefaultFirewallName, err)
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, firewalls.DefaultFirewallName, err)
}

clusterManager, err := controller.NewClusterManager(ctx, namer, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath)
Expand All @@ -182,6 +183,8 @@ func runControllers(ctx *context.ControllerContext) {
glog.Fatalf("controller.NewLoadBalancerController(ctx, clusterManager, stopCh) = %v", err)
}

fwc := firewalls.NewFirewallController(ctx, namer, flags.F.NodePortRanges.Values())

if clusterManager.ClusterNamer.UID() != "" {
glog.V(0).Infof("Cluster name: %+v", clusterManager.ClusterNamer.UID())
}
Expand All @@ -197,6 +200,9 @@ func runControllers(ctx *context.ControllerContext) {

go app.RunSIGTERMHandler(lbc, flags.F.DeleteAllOnQuit)

go fwc.Run(stopCh)
glog.V(0).Infof("firewall controller started")

ctx.Start(stopCh)
lbc.Run()

Expand Down
5 changes: 5 additions & 0 deletions pkg/context/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
)

const (
// Frequency to poll on local stores to sync.
StoreSyncPollPeriod = 5 * time.Second
)

// ControllerContext holds the state needed for the execution of the controller.
type ControllerContext struct {
KubeClient kubernetes.Interface
Expand Down
34 changes: 0 additions & 34 deletions pkg/controller/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,12 @@ limitations under the License.
package controller

import (
"net/http"

"github.com/golang/glog"

compute "google.golang.org/api/compute/v1"
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"

"k8s.io/ingress-gce/pkg/backends"
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/firewalls"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/healthchecks"
"k8s.io/ingress-gce/pkg/instances"
"k8s.io/ingress-gce/pkg/loadbalancers"
Expand All @@ -40,7 +35,6 @@ type ClusterManager struct {
instancePool instances.NodePool
backendPool backends.BackendPool
l7Pool loadbalancers.LoadBalancerPool
firewallPool firewalls.SingleFirewallPool

// TODO: Refactor so we simply init a health check pool.
healthChecker healthchecks.HealthChecker
Expand All @@ -53,32 +47,10 @@ func (c *ClusterManager) Init(zl instances.ZoneLister, pp backends.ProbeProvider
// TODO: Initialize other members as needed.
}

// IsHealthy returns an error if the cluster manager is unhealthy.
func (c *ClusterManager) IsHealthy() (err error) {
// TODO: Expand on this, for now we just want to detect when the GCE client
// is broken.
_, err = c.backendPool.List()

// If this container is scheduled on a node without compute/rw it is
// effectively useless, but it is healthy. Reporting it as unhealthy
// will lead to container crashlooping.
if utils.IsHTTPErrorCode(err, http.StatusForbidden) {
glog.Infof("Reporting cluster as healthy, but unable to list backends: %v", err)
return nil
}
return
}

func (c *ClusterManager) shutdown() error {
if err := c.l7Pool.Shutdown(); err != nil {
return err
}
if err := c.firewallPool.Shutdown(); err != nil {
if _, ok := err.(*firewalls.FirewallXPNError); ok {
return nil
}
return err
}
// The backend pool will also delete instance groups.
return c.backendPool.Shutdown()
}
Expand Down Expand Up @@ -106,10 +78,6 @@ func (c *ClusterManager) EnsureInstanceGroupsAndPorts(nodeNames []string, servic
return igs, err
}

func (c *ClusterManager) EnsureFirewall(nodeNames []string, endpointPorts []string) error {
return c.firewallPool.Sync(nodeNames, endpointPorts...)
}

// GC garbage collects unused resources.
// - lbNames are the names of L7 loadbalancers we wish to exist. Those not in
// this list are removed from the cloud.
Expand Down Expand Up @@ -144,7 +112,6 @@ func (c *ClusterManager) GC(lbNames []string, nodePorts []utils.ServicePort) err
return err
}
glog.V(2).Infof("Shutting down firewall as there are no loadbalancers")
c.firewallPool.Shutdown()
}

return nil
Expand Down Expand Up @@ -174,6 +141,5 @@ func NewClusterManager(

// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(ctx.Cloud, cluster.ClusterNamer)
cluster.firewallPool = firewalls.NewFirewallPool(ctx.Cloud, cluster.ClusterNamer, gce.LoadBalancerSrcRanges(), flags.F.NodePortRanges.Values())
return &cluster, nil
}
Loading

0 comments on commit ebaf600

Please sign in to comment.