From fc635bb9d01d4903b694673ea3f50c0847ab8e05 Mon Sep 17 00:00:00 2001 From: Joao Morais Date: Sat, 7 Aug 2021 21:39:22 -0300 Subject: [PATCH] fix change notification of backend shard HAProxy backends can be sharded into smaller pieces, so unchanged backends don't need to be rebuilt, reducing the time spent regenerating the configuration file, specially on deployments with thousands of backends. HAProxy Ingress controls the shards that should be rebuilt via the partial parsing implementation of ingress converter. Partial parsing tracks everything that need to be rebuilt, including backends. The internal HAProxy model controls the shards that should be rebuilt based on the backends tracked by the ingress converter. This means that changes made outside this controlled environment can be out of sync with configurations saved to disk, which is what HAProxy uses when it is restarted. This synchronization can be eventually fixed depending on new received events, sometimes this doesn't trigger HAProxy reloads, leading to the internal model in sync with disk, but not synced with the HAProxy instance. This out of sync is happening when HAProxy needs to be reloaded: all backends are iterated and new empty endpoints are added if needed. If the backend that received new endpoints wasn't tracked due to api changes, it is considered as a read only backend and its configuration file won't be updated. However the internal model has this new endpoint and will eventually use it, leading to a "No such server" from HAProxy client socket, which the controller was ignoring up to v0.12.5. This behavior is mitigated since v0.12.6 where unexpected responses triggers a reload with synced configuration files, and this update fixes a root cause of this behavior. This should be merged as far as v0.11. --- pkg/haproxy/dynupdate.go | 15 ++++++++++++++- pkg/haproxy/types/backends.go | 15 ++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/pkg/haproxy/dynupdate.go b/pkg/haproxy/dynupdate.go index 1cb097559..2d7bb35e8 100644 --- a/pkg/haproxy/dynupdate.go +++ b/pkg/haproxy/dynupdate.go @@ -331,7 +331,8 @@ func (d *dynUpdater) checkEndpointPair(backend *hatypes.Backend, pair *epPair) b } func (d *dynUpdater) alignSlots() { - for _, back := range d.config.Backends().Items() { + backends := d.config.Backends() + for _, back := range backends.Items() { if !back.Dynamic.DynUpdate { // no need to add empty slots if won't dynamically update continue @@ -342,6 +343,7 @@ func (d *dynUpdater) alignSlots() { blockSize = 1 } var newFreeSlots int + changed := false if minFreeSlots == 0 && len(back.Endpoints) == 0 { newFreeSlots = blockSize } else { @@ -353,6 +355,7 @@ func (d *dynUpdater) alignSlots() { } for i := totalFreeSlots; i < minFreeSlots; i++ { back.AddEmptyEndpoint() + changed = true } // * []endpoints == group of blocks // * block == group of slots @@ -362,6 +365,16 @@ func (d *dynUpdater) alignSlots() { } for i := 0; i < newFreeSlots; i++ { back.AddEmptyEndpoint() + changed = true + } + if changed { + // backends from the Items() map are read only and their changes might not be + // reflected in the final configuration. Currently only sharded backends have + // this behavior but this can be expanded to another scenarios in the future, + // so this should be properly handled by the model. + // + // TODO move to the model the responsibility to know that a backend was changed. + backends.BackendChanged(back) } } } diff --git a/pkg/haproxy/types/backends.go b/pkg/haproxy/types/backends.go index 3e6442466..ad03add7a 100644 --- a/pkg/haproxy/types/backends.go +++ b/pkg/haproxy/types/backends.go @@ -62,7 +62,7 @@ func (b *Backends) Shrink() { changed := false for name, del := range b.itemsDel { if add, found := b.itemsAdd[name]; found { - if backendsMatch(add, del) { + if len(add.Endpoints) <= len(del.Endpoints) && backendsMatch(add, del) { // Such changed backend, when removed from the tracking, need to // be reincluded into the current state hashmap `items` and also // into its shard hashmap when backend sharding is enabled. @@ -82,10 +82,10 @@ func (b *Backends) Shrink() { if changed { b.changedShards = map[int]bool{} for _, back := range b.itemsAdd { - b.changedShards[back.shard] = true + b.BackendChanged(back) } for _, back := range b.itemsDel { - b.changedShards[back.shard] = true + b.BackendChanged(back) } } } @@ -138,6 +138,11 @@ func (b *Backends) Changed() bool { return len(b.itemsAdd) > 0 || len(b.itemsDel) > 0 } +// BackendChanged ... +func (b *Backends) BackendChanged(backend *Backend) { + b.changedShards[backend.shard] = true +} + // ChangedShards ... func (b *Backends) ChangedShards() []int { changed := []int{} @@ -226,7 +231,7 @@ func (b *Backends) AcquireBackend(namespace, name, port string) *Backend { if shardCount > 0 { b.shards[backend.shard][backend.ID] = backend } - b.changedShards[backend.shard] = true + b.BackendChanged(backend) return backend } @@ -267,7 +272,7 @@ func (b *Backends) RemoveAll(backendID []BackendID) { if len(b.shards) > 0 { delete(b.shards[item.shard], id) } - b.changedShards[item.shard] = true + b.BackendChanged(item) b.itemsDel[id] = item if item == b.DefaultBackend { b.DefaultBackend = nil