Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extract firewall management into separate controller #403

Merged
merged 1 commit into from
Jul 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions cmd/glbc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"k8s.io/ingress-gce/cmd/glbc/app"
"k8s.io/ingress-gce/pkg/backendconfig"
"k8s.io/ingress-gce/pkg/crd"
"k8s.io/ingress-gce/pkg/firewalls"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/version"
)
Expand Down Expand Up @@ -166,9 +167,9 @@ func makeLeaderElectionConfig(client clientset.Interface, recorder record.EventR
}

func runControllers(ctx *context.ControllerContext) {
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, controller.DefaultFirewallName)
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, firewalls.DefaultFirewallName)
if err != nil {
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, controller.DefaultFirewallName, err)
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, firewalls.DefaultFirewallName, err)
}

clusterManager, err := controller.NewClusterManager(ctx, namer, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath)
Expand All @@ -182,6 +183,8 @@ func runControllers(ctx *context.ControllerContext) {
glog.Fatalf("controller.NewLoadBalancerController(ctx, clusterManager, stopCh) = %v", err)
}

fwc := firewalls.NewFirewallController(ctx, namer, flags.F.NodePortRanges.Values())

if clusterManager.ClusterNamer.UID() != "" {
glog.V(0).Infof("Cluster name: %+v", clusterManager.ClusterNamer.UID())
}
Expand All @@ -197,6 +200,9 @@ func runControllers(ctx *context.ControllerContext) {

go app.RunSIGTERMHandler(lbc, flags.F.DeleteAllOnQuit)

go fwc.Run(stopCh)
glog.V(0).Infof("firewall controller started")

ctx.Start(stopCh)
lbc.Run()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this blocks...

Copy link
Contributor Author

@rramkumar1 rramkumar1 Jul 17, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oops..moved start of fwc above lbc.Run()


Expand Down
5 changes: 5 additions & 0 deletions pkg/context/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
)

const (
// Frequency to poll on local stores to sync.
StoreSyncPollPeriod = 5 * time.Second
)

// ControllerContext holds the state needed for the execution of the controller.
type ControllerContext struct {
KubeClient kubernetes.Interface
Expand Down
34 changes: 0 additions & 34 deletions pkg/controller/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,12 @@ limitations under the License.
package controller

import (
"net/http"

"github.com/golang/glog"

compute "google.golang.org/api/compute/v1"
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"

"k8s.io/ingress-gce/pkg/backends"
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/firewalls"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/healthchecks"
"k8s.io/ingress-gce/pkg/instances"
"k8s.io/ingress-gce/pkg/loadbalancers"
Expand All @@ -40,7 +35,6 @@ type ClusterManager struct {
instancePool instances.NodePool
backendPool backends.BackendPool
l7Pool loadbalancers.LoadBalancerPool
firewallPool firewalls.SingleFirewallPool

// TODO: Refactor so we simply init a health check pool.
healthChecker healthchecks.HealthChecker
Expand All @@ -53,32 +47,10 @@ func (c *ClusterManager) Init(zl instances.ZoneLister, pp backends.ProbeProvider
// TODO: Initialize other members as needed.
}

// IsHealthy returns an error if the cluster manager is unhealthy.
func (c *ClusterManager) IsHealthy() (err error) {
// TODO: Expand on this, for now we just want to detect when the GCE client
// is broken.
_, err = c.backendPool.List()

// If this container is scheduled on a node without compute/rw it is
// effectively useless, but it is healthy. Reporting it as unhealthy
// will lead to container crashlooping.
if utils.IsHTTPErrorCode(err, http.StatusForbidden) {
glog.Infof("Reporting cluster as healthy, but unable to list backends: %v", err)
return nil
}
return
}

func (c *ClusterManager) shutdown() error {
if err := c.l7Pool.Shutdown(); err != nil {
return err
}
if err := c.firewallPool.Shutdown(); err != nil {
if _, ok := err.(*firewalls.FirewallXPNError); ok {
return nil
}
return err
}
// The backend pool will also delete instance groups.
return c.backendPool.Shutdown()
}
Expand Down Expand Up @@ -106,10 +78,6 @@ func (c *ClusterManager) EnsureInstanceGroupsAndPorts(nodeNames []string, servic
return igs, err
}

func (c *ClusterManager) EnsureFirewall(nodeNames []string, endpointPorts []string) error {
return c.firewallPool.Sync(nodeNames, endpointPorts...)
}

// GC garbage collects unused resources.
// - lbNames are the names of L7 loadbalancers we wish to exist. Those not in
// this list are removed from the cloud.
Expand Down Expand Up @@ -144,7 +112,6 @@ func (c *ClusterManager) GC(lbNames []string, nodePorts []utils.ServicePort) err
return err
}
glog.V(2).Infof("Shutting down firewall as there are no loadbalancers")
c.firewallPool.Shutdown()
}

return nil
Expand Down Expand Up @@ -174,6 +141,5 @@ func NewClusterManager(

// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(ctx.Cloud, cluster.ClusterNamer)
cluster.firewallPool = firewalls.NewFirewallPool(ctx.Cloud, cluster.ClusterNamer, gce.LoadBalancerSrcRanges(), flags.F.NodePortRanges.Values())
return &cluster, nil
}
Loading