From 6dcab358035041a7f4d68003456ac084b4abff1a Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Tue, 7 Dec 2021 18:31:21 +0200 Subject: [PATCH 1/8] add service and deployment handling --- Makefile | 5 +- api/v1alpha1/envoyfleet_types.go | 51 + api/v1alpha1/zz_generated.deepcopy.go | 71 ++ .../crd/bases/gateway.kusk.io_envoyfleet.yaml | 1007 +++++++++++++++++ config/samples/gateway_v1_envoyfleet.yaml | 71 +- controllers/envoyfleet_controller.go | 32 +- controllers/envoyfleet_resources.go | 285 +++++ go.mod | 2 +- go.sum | 4 +- k8sutils/envoy_config_template.go | 46 - k8sutils/envoy_deployment.go | 208 ---- k8sutils/utils.go | 41 + main.go | 5 +- 13 files changed, 1554 insertions(+), 274 deletions(-) create mode 100644 controllers/envoyfleet_resources.go delete mode 100644 k8sutils/envoy_config_template.go delete mode 100644 k8sutils/envoy_deployment.go create mode 100644 k8sutils/utils.go diff --git a/Makefile b/Makefile index 6865896fb..b24502be8 100644 --- a/Makefile +++ b/Makefile @@ -111,10 +111,13 @@ deploy-debug: manifests kustomize ## Deploy controller with debugger to the K8s undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - +cycle: ## Trigger manager deployment rollout restart to pick up the new container image with the same tag + kubectl rollout restart deployment/kusk-controller-manager -n kusk-system + @echo "Triggered deployment/kusk-controller-manager restart" CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0) KUSTOMIZE = $(shell pwd)/bin/kustomize kustomize: ## Download kustomize locally if necessary. diff --git a/api/v1alpha1/envoyfleet_types.go b/api/v1alpha1/envoyfleet_types.go index cf420f7f1..99084b678 100644 --- a/api/v1alpha1/envoyfleet_types.go +++ b/api/v1alpha1/envoyfleet_types.go @@ -25,6 +25,7 @@ SOFTWARE. package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -36,12 +37,62 @@ type EnvoyFleetSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + // Service describes Envoy K8s service settings + Service *ServiceConfig `json:"service"` + + // Envoy image tag + Image string `json:"image"` + // Node Selector is used to schedule the Envoy pod(s) to the specificly labeled nodes, optional + // This is the map of "key: value" labels (e.g. "disktype": "ssd") + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Affinity is used to schedule Envoy pod(s) to specific nodes, optional + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Tolerations allow pod to be scheduled to the nodes that has specific toleration labels, optional + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + // Additional Envoy Deployment annotations, optional + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // Resources allow to set CPU and Memory resource requests and limits, optional + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // Size field specifies the number of Envoy Pods being deployed. Optional, default value is 1. // +kubebuilder:validation:Minimum=1 // +kubebuilder:default:=1 Size *int32 `json:"size,omitempty"` } +type ServiceConfig struct { + + // Kubernetes service type: NodePort, ClusterIP or LoadBalancer + // +kubebuilder:validation:Enum=NodePort;ClusterIP;LoadBalancer + Type corev1.ServiceType `json:"type"` + + // Kubernetes Service ports + Ports []corev1.ServicePort `json:"ports"` + + // Service's annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Static ip address for the LoadBalancer type if available + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` +} + // EnvoyFleetStatus defines the observed state of EnvoyFleet type EnvoyFleetStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index d3612b1bf..f80486f55 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -31,6 +31,7 @@ package v1alpha1 import ( "github.com/kubeshop/kusk-gateway/options" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -209,6 +210,47 @@ func (in *EnvoyFleetList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvoyFleetSpec) DeepCopyInto(out *EnvoyFleetSpec) { *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } if in.Size != nil { in, out := &in.Size, &out.Size *out = new(int32) @@ -303,6 +345,35 @@ func (in *Route) DeepCopy() *Route { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceConfig) DeepCopyInto(out *ServiceConfig) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConfig. +func (in *ServiceConfig) DeepCopy() *ServiceConfig { + if in == nil { + return nil + } + out := new(ServiceConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StaticRoute) DeepCopyInto(out *StaticRoute) { *out = *in diff --git a/config/crd/bases/gateway.kusk.io_envoyfleet.yaml b/config/crd/bases/gateway.kusk.io_envoyfleet.yaml index e77056799..2fea9153b 100644 --- a/config/crd/bases/gateway.kusk.io_envoyfleet.yaml +++ b/config/crd/bases/gateway.kusk.io_envoyfleet.yaml @@ -40,6 +40,957 @@ spec: spec: description: EnvoyFleetSpec defines the desired state of EnvoyFleet properties: + affinity: + description: Affinity is used to schedule Envoy pod(s) to specific + nodes, optional + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Additional Envoy Deployment annotations, optional + type: object + image: + description: Envoy image tag + type: string + nodeSelector: + additionalProperties: + type: string + description: 'Node Selector is used to schedule the Envoy pod(s) to + the specificly labeled nodes, optional This is the map of "key: + value" labels (e.g. "disktype": "ssd")' + type: object + resources: + description: Resources allow to set CPU and Memory resource requests + and limits, optional + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + service: + description: Service describes Envoy K8s service settings + properties: + annotations: + additionalProperties: + type: string + description: Service's annotations + type: object + loadBalancerIP: + description: Static ip address for the LoadBalancer type if available + type: string + ports: + description: Kubernetes Service ports + items: + description: ServicePort contains information on service's port. + properties: + appProtocol: + description: The application protocol for this port. This + field follows standard Kubernetes label syntax. Un-prefixed + names are reserved for IANA standard service names (as + per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. + type: string + name: + description: The name of this port within the service. This + must be a DNS_LABEL. All ports within a ServiceSpec must + have unique names. When considering the endpoints for + a Service, this must match the 'name' field in the EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type is NodePort or LoadBalancer. Usually + assigned by the system. If a value is specified, in-range, + and not in use it will be used, otherwise the operation + will fail. If not specified, a port will be allocated + if this Service requires one. If this field is specified + when creating a Service which does not need it, creation + will fail. This field will be wiped when updating a Service + to no longer need it (e.g. changing type from NodePort + to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: The IP protocol for this port. Supports "TCP", + "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on the + pods targeted by the service. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. If this is + a string, it will be looked up as a named port in the + target Pod''s container ports. If this is not specified, + the value of the ''port'' field is used (an identity map). + This field is ignored for services with clusterIP=None, + and should be omitted or set equal to the ''port'' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: + description: 'Kubernetes service type: NodePort, ClusterIP or + LoadBalancer' + enum: + - NodePort + - ClusterIP + - LoadBalancer + type: string + required: + - ports + - type + type: object size: default: 1 description: Size field specifies the number of Envoy Pods being deployed. @@ -47,6 +998,62 @@ spec: format: int32 minimum: 1 type: integer + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate + gracefully. May be decreased in delete request. Value must be non-negative + integer. The value zero indicates stop immediately via the kill + signal (no opportunity to shut down). If this value is nil, the + default grace period will be used instead. The grace period is the + duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer than the expected + cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: Tolerations allow pod to be scheduled to the nodes that + has specific toleration labels, optional + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - image + - service type: object status: description: EnvoyFleetStatus defines the observed state of EnvoyFleet diff --git a/config/samples/gateway_v1_envoyfleet.yaml b/config/samples/gateway_v1_envoyfleet.yaml index 74a22d4a0..d85561456 100644 --- a/config/samples/gateway_v1_envoyfleet.yaml +++ b/config/samples/gateway_v1_envoyfleet.yaml @@ -2,4 +2,73 @@ apiVersion: gateway.kusk.io/v1alpha1 kind: EnvoyFleet metadata: name: default -spec: {} +spec: + image: "envoyproxy/envoy-alpine:v1.20.0" + service: + # NodePort, ClusterIP, LoadBalancer + type: LoadBalancer + # Specify annotations to modify service behaviour, e.g. for GCP to create internal load balancer: + # annotations: + # networking.gke.io/load-balancer-type: "Internal" + # Specify predefined static load balancer IP address if present + #loadBalancerIP: 10.10.10.10 + ports: + - name: http + port: 80 + targetPort: http + - name: https + port: 443 + targetPort: https + resources: + # limits: + # cpu: 1 + # memory: 100M + requests: + cpu: 10m + memory: 100M + # Put annotations to scrape pods. + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9102' + # Scheduling directives + # Read https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for the details. + + # Optional - schedule Envoy pods to the node with the label "disktype=ssd". + # nodeSelector: + # disktype: "ssd" + + # Optional - allow to be scheduled on the "tainted" node. Taint with "kubectl taint nodes node1 key1=value1:NoSchedule". + # Taints will repel the pods from the node unless the pods have the specific toleration. + # The line below will allow this specific Envoy pod to be scheduled there (but scheduler decideds where to put it anyway). + # tolerations: + # - key: "key1" + # operator: "Exists" + # effect: "NoSchedule" + + # Optional - provide pods affinity and anti-affinity settings. + # This newer and more flexible scheme can replace nodeSelector but they can be specified together too. + # For the scalability and fault tolerance we prefer to put all Envoy pods on different nodes - in case one node fails we survive on others. + # The block below will search for all matching labels of THIS "default" envoy fleet pods and will try to schedule pods + # on different nodes. + # Other fleets (if present) are not taken into consideration. You can specify nodeAffinity and podAffinity as well. + # affinity: + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - kusk-gateway + # - key: component + # operator: In + # values: + # - envoy + # - key: fleet + # operator: In + # values: + # - default + # topologyKey: kubernetes.io/hostname + + # optional + size: 1 diff --git a/controllers/envoyfleet_controller.go b/controllers/envoyfleet_controller.go index ae7f88377..fae0797be 100644 --- a/controllers/envoyfleet_controller.go +++ b/controllers/envoyfleet_controller.go @@ -41,8 +41,7 @@ import ( // EnvoyFleetReconciler reconciles a EnvoyFleet object type EnvoyFleetReconciler struct { client.Client - Scheme *runtime.Scheme - ConfigManager *KubeEnvoyConfigManager + Scheme *runtime.Scheme } // +kubebuilder:rbac:groups=gateway.kusk.io,resources=envoyfleet,verbs=get;list;watch;create;update;patch;delete @@ -59,32 +58,41 @@ func (r *EnvoyFleetReconciler) Reconcile(ctx context.Context, req ctrl.Request) ef := &gatewayv1alpha1.EnvoyFleet{} - err := r.Client.Get(context.TODO(), req.NamespacedName, ef) + err := r.Client.Get(ctx, req.NamespacedName, ef) if err != nil { if errors.IsNotFound(err) { // EnvoyFleet was deleted - deployment and config deletion is handled by the API server itself // thanks to OwnerReference + l.Info("No objects found, looks like EnvoyFleet was deleted") return ctrl.Result{}, nil } - + l.Error(err, "Failed to retrieve EnvoyFleet object with cluster API") return ctrl.Result{Requeue: true}, err } if err := controllerutil.SetControllerReference(ef, ef, r.Scheme); err != nil { + l.Error(err, "Failed setting controller owner reference") return ctrl.Result{}, err } - - if err := k8sutils.CreateEnvoyConfig(ctx, r.Client, ef); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create envoy config: %w", err) + efResources, err := NewEnvoyFleetResources(ef) + if err != nil { + l.Error(err, "Failed to create envoy fleet configuration") + return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet configuration: %w", err) } - if err := k8sutils.CreateEnvoyDeployment(ctx, r.Client, ef); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create envoy deployment: %w", err) + if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.configMap); err != nil { + l.Error(err, "Failed to create envoy envoy config map") + return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet config map: %w", err) } - - if err := k8sutils.CreateEnvoyService(ctx, r.Client, ef); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create envoy service: %w", err) + if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.deployment); err != nil { + l.Error(err, "Failed to create envoy deployment") + return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet deployment: %w", err) + } + if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.service); err != nil { + l.Error(err, "Failed to create envoy fleet service") + return ctrl.Result{}, fmt.Errorf("failed to create envoy fleetconfig map: %w", err) } + l.Info(fmt.Sprintf("Reconciled Envoy Fleet '%s' resources", efResources.fleetName)) return ctrl.Result{}, nil } diff --git a/controllers/envoyfleet_resources.go b/controllers/envoyfleet_resources.go new file mode 100644 index 000000000..07001baec --- /dev/null +++ b/controllers/envoyfleet_resources.go @@ -0,0 +1,285 @@ +package controllers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + gatewayv1alpha1 "github.com/kubeshop/kusk-gateway/api/v1alpha1" +) + +const ( + envoyHTTPListenerPort int32 = 8080 + envoyHTTPSListenerPort int32 = 8443 + envoyAdminListenerPort int32 = 19000 +) + +// EnvoyFleetResources is a collection of related Envoy Fleet K8s resources +type EnvoyFleetResources struct { + fleetName string + configMap *corev1.ConfigMap + deployment *appsv1.Deployment + service *corev1.Service + commonLabels map[string]string +} + +func NewEnvoyFleetResources(ef *gatewayv1alpha1.EnvoyFleet) (*EnvoyFleetResources, error) { + f := &EnvoyFleetResources{ + fleetName: ef.Name, + commonLabels: map[string]string{ + "app": "kusk-gateway", + "fleet": ef.Name, + }, + } + + if err := f.CreateConfigMap(ef); err != nil { + return nil, err + } + // Depends on the ConfigMap + if err := f.CreateDeployment(ef); err != nil { + return nil, err + } + // Depends on the Service + if err := f.CreateService(ef); err != nil { + return nil, err + } + return f, nil +} + +func (e *EnvoyFleetResources) CreateConfigMap(ef *gatewayv1alpha1.EnvoyFleet) error { + // future object labels + labels := map[string]string{ + "component": "envoy-config", + } + // Copy over shared labels map + for key, value := range e.commonLabels { + labels[key] = value + } + + configMapName := "kusk-envoy-config-" + e.fleetName + + e.configMap = &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: ef.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + }, + Data: map[string]string{ + "envoy-config.yaml": fmt.Sprintf(envoyConfigTemplate, e.fleetName), + }, + } + + return nil +} + +func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) error { + // future object labels + labels := map[string]string{ + "component": "envoy", + } + // Copy over shared labels map + for key, value := range e.commonLabels { + labels[key] = value + } + + deploymentName := "kusk-envoy-" + e.fleetName + + configMapName := e.configMap.Name + + envoyContainer := corev1.Container{ + Name: "envoy", + Image: ef.Spec.Image, + ImagePullPolicy: "IfNotPresent", + Command: []string{"/bin/sh", "-c"}, + Args: []string{ + "envoy -c /etc/envoy/envoy.yaml --service-node $POD_NAME", + }, + Env: []corev1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "envoy-config", + MountPath: "/etc/envoy/envoy.yaml", + SubPath: "envoy-config.yaml", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: envoyHTTPListenerPort, + }, + { + Name: "https", + ContainerPort: envoyHTTPSListenerPort, + }, + { + Name: "admin", + ContainerPort: envoyAdminListenerPort, + }, + }, + } + // Set Enovy Pod Resources if specified + if ef.Spec.Resources != nil { + if ef.Spec.Resources.Limits != nil { + envoyContainer.Resources.Limits = *&ef.Spec.Resources.Limits + } + if ef.Spec.Resources.Requests != nil { + envoyContainer.Resources.Requests = *&ef.Spec.Resources.Requests + } + } + e.deployment = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: ef.Namespace, + Labels: labels, + Annotations: ef.Spec.Annotations, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ef.Spec.Size, + Selector: labelSelectors(labels), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + envoyContainer, + }, + Volumes: []corev1.Volume{ + { + Name: "envoy-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + }, + }, + NodeSelector: ef.Spec.NodeSelector, + Affinity: ef.Spec.Affinity, + Tolerations: ef.Spec.Tolerations, + TerminationGracePeriodSeconds: ef.Spec.TerminationGracePeriodSeconds, + }, + }, + }, + } + return nil +} + +func (f *EnvoyFleetResources) CreateService(ef *gatewayv1alpha1.EnvoyFleet) error { + // future object labels + labels := map[string]string{ + "component": "envoy-svc", + } + // Copy over shared labels map + for key, value := range f.commonLabels { + labels[key] = value + } + serviceName := "kusk-envoy-svc-" + ef.Name + + f.service = &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: ef.Namespace, + Labels: labels, + Annotations: ef.Spec.Service.Annotations, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + }, + Spec: corev1.ServiceSpec{ + Ports: ef.Spec.Service.Ports, + Selector: f.deployment.Spec.Selector.MatchLabels, + Type: ef.Spec.Service.Type, + }, + } + // Static IP address for the LoadBalancer + if ef.Spec.Service.Type == corev1.ServiceTypeLoadBalancer && ef.Spec.Service.LoadBalancerIP != "" { + f.service.Spec.LoadBalancerIP = ef.Spec.Service.LoadBalancerIP + } + + return nil +} +func envoyFleetAsOwner(cr *gatewayv1alpha1.EnvoyFleet) metav1.OwnerReference { + trueVar := true + return metav1.OwnerReference{ + APIVersion: cr.APIVersion, + Kind: cr.Kind, + Name: cr.Name, + UID: cr.UID, + Controller: &trueVar, + } +} + +func labelSelectors(labels map[string]string) *metav1.LabelSelector { + return &metav1.LabelSelector{MatchLabels: labels} +} + +var envoyConfigTemplate = ` +node: + cluster: %s + +dynamic_resources: + ads_config: + api_type: GRPC + transport_api_version: V3 + grpc_services: + - envoy_grpc: + cluster_name: xds_cluster + cds_config: + resource_api_version: V3 + ads: {} + lds_config: + resource_api_version: V3 + ads: {} + +static_resources: + clusters: + - type: STRICT_DNS + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + name: xds_cluster + load_assignment: + cluster_name: xds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: kusk-xds-service.kusk-system.svc.cluster.local + port_value: 18000 + +admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 19000 + +` diff --git a/go.mod b/go.mod index f9ecfbd35..e6f36b0af 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/kubeshop/kusk-gateway go 1.16 require ( - github.com/envoyproxy/go-control-plane v0.10.0 + github.com/envoyproxy/go-control-plane v0.10.1 github.com/fsnotify/fsnotify v1.4.9 github.com/getkin/kin-openapi v0.76.0 github.com/ghodss/yaml v1.0.0 diff --git a/go.sum b/go.sum index 7219200b9..4d30c3e52 100644 --- a/go.sum +++ b/go.sum @@ -112,8 +112,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.0 h1:WVt4HEPbdRbRD/PKKPbPnIVavO6gk/h673jWyIJ016k= -github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= diff --git a/k8sutils/envoy_config_template.go b/k8sutils/envoy_config_template.go deleted file mode 100644 index 415a09edd..000000000 --- a/k8sutils/envoy_config_template.go +++ /dev/null @@ -1,46 +0,0 @@ -package k8sutils - -var envoyConfigTemplate = ` -node: - cluster: %s - -dynamic_resources: - ads_config: - api_type: GRPC - transport_api_version: V3 - grpc_services: - - envoy_grpc: - cluster_name: xds_cluster - cds_config: - resource_api_version: V3 - ads: {} - lds_config: - resource_api_version: V3 - ads: {} - -static_resources: - clusters: - - type: STRICT_DNS - typed_extension_protocol_options: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions - explicit_http_config: - http2_protocol_options: {} - name: xds_cluster - load_assignment: - cluster_name: xds_cluster - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: kusk-xds-service.kusk-system.svc.cluster.local - port_value: 18000 - -admin: - address: - socket_address: - address: 0.0.0.0 - port_value: 19000 - -` diff --git a/k8sutils/envoy_deployment.go b/k8sutils/envoy_deployment.go deleted file mode 100644 index 9539b543e..000000000 --- a/k8sutils/envoy_deployment.go +++ /dev/null @@ -1,208 +0,0 @@ -package k8sutils - -import ( - "context" - "fmt" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - clientPkg "sigs.k8s.io/controller-runtime/pkg/client" - - gatewayv1alpha1 "github.com/kubeshop/kusk-gateway/api/v1alpha1" -) - -func CreateEnvoyConfig(ctx context.Context, client clientPkg.Client, ef *gatewayv1alpha1.EnvoyFleet) error { - labels := map[string]string{ - "app": "kusk-gateway", - "component": "envoy-config", - "fleet": ef.Name, - } - - configMapName := "kusk-envoy-config-" + ef.Name - - configMap := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: ef.Namespace, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, - }, - Data: map[string]string{ - "envoy-config.yaml": fmt.Sprintf(envoyConfigTemplate, ef.Name), - }, - } - - return createOrReplace(ctx, client, configMap.GroupVersionKind(), configMap) -} - -func CreateEnvoyService(ctx context.Context, client clientPkg.Client, ef *gatewayv1alpha1.EnvoyFleet) error { - labels := map[string]string{ - "app": "kusk-gateway", - "component": "envoy-svc", - "fleet": ef.Name, - } - - envoyLabels := map[string]string{ - "app": "kusk-gateway", - "component": "envoy", - "fleet": ef.Name, - } - - serviceName := "kusk-envoy-svc-" + ef.Name - - service := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: ef.Namespace, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 8080, - TargetPort: intstr.FromInt(8080), - }, - }, - Selector: envoyLabels, - Type: corev1.ServiceTypeLoadBalancer, - }, - } - - return createOrReplace(ctx, client, service.GroupVersionKind(), service) -} - -func CreateEnvoyDeployment(ctx context.Context, client clientPkg.Client, ef *gatewayv1alpha1.EnvoyFleet) error { - labels := map[string]string{ - "app": "kusk-gateway", - "component": "envoy", - "fleet": ef.Name, - } - - deploymentName := "kusk-envoy-" + ef.Name - configMapName := "kusk-envoy-config-" + ef.Name - - deployment := &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: ef.Namespace, - Labels: labels, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ef.Spec.Size, - Selector: labelSelectors(labels), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "envoy", - Image: "envoyproxy/envoy-alpine:v1.20.0", - Command: []string{"/bin/sh", "-c"}, - Args: []string{ - "envoy -c /etc/envoy/envoy.yaml --service-node $POD_NAME", - }, - Env: []corev1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "envoy-config", - MountPath: "/etc/envoy/envoy.yaml", - SubPath: "envoy-config.yaml", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "envoy-config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: configMapName, - }, - }, - }, - }, - }, - }, - }, - }, - } - - return createOrReplace(ctx, client, deployment.GroupVersionKind(), deployment) -} - -func envoyFleetAsOwner(cr *gatewayv1alpha1.EnvoyFleet) metav1.OwnerReference { - trueVar := true - return metav1.OwnerReference{ - APIVersion: cr.APIVersion, - Kind: cr.Kind, - Name: cr.Name, - UID: cr.UID, - Controller: &trueVar, - } -} - -func labelSelectors(labels map[string]string) *metav1.LabelSelector { - return &metav1.LabelSelector{MatchLabels: labels} -} - -func checkIfExists(ctx context.Context, client clientPkg.Client, gvk schema.GroupVersionKind, key clientPkg.ObjectKey) (resourceVersion string, ok bool, err error) { - var obj unstructured.Unstructured - - obj.SetGroupVersionKind(gvk) - - err = client.Get(ctx, key, &obj) - if err != nil { - if errors.IsNotFound(err) { - return "", false, nil - } - - return "", false, err - } - - return obj.GetResourceVersion(), true, nil -} - -func createOrReplace(ctx context.Context, client clientPkg.Client, gvk schema.GroupVersionKind, obj clientPkg.Object) error { - resourceVersion, ok, err := checkIfExists(ctx, client, gvk, clientPkg.ObjectKeyFromObject(obj)) - if err != nil { - return err - } - - if ok { - obj.SetResourceVersion(resourceVersion) - return client.Update(ctx, obj) - } - - return client.Create(ctx, obj) -} diff --git a/k8sutils/utils.go b/k8sutils/utils.go new file mode 100644 index 000000000..026c1c811 --- /dev/null +++ b/k8sutils/utils.go @@ -0,0 +1,41 @@ +package k8sutils + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + clientPkg "sigs.k8s.io/controller-runtime/pkg/client" +) + +func checkIfExists(ctx context.Context, client clientPkg.Client, gvk schema.GroupVersionKind, key clientPkg.ObjectKey) (resourceVersion string, ok bool, err error) { + var obj unstructured.Unstructured + + obj.SetGroupVersionKind(gvk) + + err = client.Get(ctx, key, &obj) + if err != nil { + if errors.IsNotFound(err) { + return "", false, nil + } + + return "", false, err + } + + return obj.GetResourceVersion(), true, nil +} + +func CreateOrReplace(ctx context.Context, client clientPkg.Client, obj clientPkg.Object) error { + resourceVersion, ok, err := checkIfExists(ctx, client, obj.GetObjectKind().GroupVersionKind(), clientPkg.ObjectKeyFromObject(obj)) + if err != nil { + return err + } + + if ok { + obj.SetResourceVersion(resourceVersion) + return client.Update(ctx, obj) + } + + return client.Create(ctx, obj) +} diff --git a/main.go b/main.go index bde6ffa30..13201bf0c 100644 --- a/main.go +++ b/main.go @@ -116,9 +116,8 @@ func main() { } if err = (&controllers.EnvoyFleetReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - ConfigManager: &controllerConfigManager, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "EnvoyFleet") os.Exit(1) From ad3e6f4493769a2ebd699807965b4ef7f5be29b4 Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Wed, 8 Dec 2021 17:16:40 +0200 Subject: [PATCH 2/8] Rewrite accordingly with helm deployment --- Makefile | 4 ++-- config/crd/patches/webhook_in_apis.yaml | 2 +- config/crd/patches/webhook_in_envoyfleet.yaml | 2 +- config/crd/patches/webhook_in_staticroutes.yaml | 2 +- config/default/kustomization.yaml | 11 ++++++----- config/default/manager_auth_proxy_patch.yaml | 2 +- config/default/manager_config_patch.yaml | 2 +- config/default/manager_debug_patch.yaml | 2 +- config/default/manager_webhook_patch.yaml | 2 +- config/default/webhookcainjection_patch.yaml | 4 ++-- config/local/kustomization.yaml | 6 ++++-- config/local/namespace.yaml | 2 -- config/manager/kustomization.yaml | 8 ++++---- config/manager/manager.yaml | 14 ++++++-------- config/manager/service.yaml | 10 ++++++++-- config/rbac/api_editor_role.yaml | 2 +- config/rbac/api_viewer_role.yaml | 2 +- config/rbac/auth_proxy_client_clusterrole.yaml | 2 +- config/rbac/auth_proxy_role.yaml | 2 +- config/rbac/auth_proxy_role_binding.yaml | 6 +++--- config/rbac/auth_proxy_service.yaml | 10 +++++++--- config/rbac/envoyfleet_editor_role.yaml | 2 +- config/rbac/envoyfleet_manager_role.yaml | 2 +- config/rbac/envoyfleet_manager_role_binding.yaml | 6 +++--- config/rbac/envoyfleet_viewer_role.yaml | 2 +- config/rbac/leader_election_role.yaml | 2 +- config/rbac/leader_election_role_binding.yaml | 4 ++-- config/rbac/role.yaml | 2 +- config/rbac/role_binding.yaml | 6 +++--- config/rbac/service_account.yaml | 2 +- config/rbac/staticroute_editor_role.yaml | 2 +- config/rbac/staticroute_viewer_role.yaml | 2 +- config/webhook/service.yaml | 12 +++++++++--- controllers/envoyfleet_resources.go | 1 - 34 files changed, 78 insertions(+), 64 deletions(-) diff --git a/Makefile b/Makefile index b24502be8..2fd97469c 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ delete-env: ## Destroy the local development k3d cluster ./development/cluster/delete-env.sh manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=kusk-gateway-manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." @@ -99,7 +99,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified $(KUSTOMIZE) build config/crd | kubectl delete -f - deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + cd config/manager && $(KUSTOMIZE) edit set image manager=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - deploy-debug: manifests kustomize ## Deploy controller with debugger to the K8s cluster specified in ~/.kube/config. diff --git a/config/crd/patches/webhook_in_apis.yaml b/config/crd/patches/webhook_in_apis.yaml index b81dc33df..cc0eb41d1 100644 --- a/config/crd/patches/webhook_in_apis.yaml +++ b/config/crd/patches/webhook_in_apis.yaml @@ -10,7 +10,7 @@ spec: clientConfig: service: namespace: system - name: webhook-service + name: kusk-gateway-webhooks-service path: /convert conversionReviewVersions: - v1 diff --git a/config/crd/patches/webhook_in_envoyfleet.yaml b/config/crd/patches/webhook_in_envoyfleet.yaml index 3c77e05f4..aef94e063 100644 --- a/config/crd/patches/webhook_in_envoyfleet.yaml +++ b/config/crd/patches/webhook_in_envoyfleet.yaml @@ -10,7 +10,7 @@ spec: clientConfig: service: namespace: system - name: webhook-service + name: kusk-gateway-webhooks-service path: /convert conversionReviewVersions: - v1 diff --git a/config/crd/patches/webhook_in_staticroutes.yaml b/config/crd/patches/webhook_in_staticroutes.yaml index 5c955d5a9..dc0ea6069 100644 --- a/config/crd/patches/webhook_in_staticroutes.yaml +++ b/config/crd/patches/webhook_in_staticroutes.yaml @@ -10,7 +10,7 @@ spec: clientConfig: service: namespace: system - name: webhook-service + name: kusk-gateway-webhooks-service path: /convert conversionReviewVersions: - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index e7b74037a..143fa0c62 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -6,11 +6,12 @@ namespace: kusk-system # "wordpress" becomes "alices-wordpress". # Note that it should also match with the prefix (text before '-') of the namespace # field above. -namePrefix: kusk- +# namePrefix: # Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue +commonLabels: + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/instance: kusk-gateway-development bases: - ../crd @@ -64,11 +65,11 @@ vars: objref: kind: Service version: v1 - name: webhook-service + name: kusk-gateway-webhooks-service fieldref: fieldpath: metadata.namespace - name: SERVICE_NAME objref: kind: Service version: v1 - name: webhook-service + name: kusk-gateway-webhooks-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index a224be19e..e2d4ebd68 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system spec: template: diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index 6c400155c..27d483770 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system spec: template: diff --git a/config/default/manager_debug_patch.yaml b/config/default/manager_debug_patch.yaml index 8d8160a3f..9be5a9862 100644 --- a/config/default/manager_debug_patch.yaml +++ b/config/default/manager_debug_patch.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system spec: template: diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml index dbfdf0b28..8a34ff6c8 100644 --- a/config/default/manager_webhook_patch.yaml +++ b/config/default/manager_webhook_patch.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system spec: template: diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml index 02ab515d4..ce0a50c7e 100644 --- a/config/default/webhookcainjection_patch.yaml +++ b/config/default/webhookcainjection_patch.yaml @@ -3,13 +3,13 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - name: mutating-webhook-configuration + name: kusk-gateway-mutating-webhook-configuration annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: validating-webhook-configuration + name: kusk-gateway-validating-webhook-configuration annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/local/kustomization.yaml b/config/local/kustomization.yaml index 5035e2458..49dc34dd6 100644 --- a/config/local/kustomization.yaml +++ b/config/local/kustomization.yaml @@ -9,8 +9,10 @@ namespace: kusk-system namePrefix: kusk- # Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue +commonLabels: + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/instance: kusk-gateway-development + bases: - namespace.yaml diff --git a/config/local/namespace.yaml b/config/local/namespace.yaml index 8b55c3cd8..1ab3a7255 100644 --- a/config/local/namespace.yaml +++ b/config/local/namespace.yaml @@ -1,6 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - labels: - control-plane: controller-manager name: system diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 33b9a33fa..05590d0e5 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,3 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization resources: - manager.yaml - service.yaml @@ -8,10 +10,8 @@ generatorOptions: configMapGenerator: - files: - controller_manager_config.yaml - name: manager-config -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization + name: kusk-gateway-config images: -- name: controller +- name: manager newName: kusk-gateway newTag: dev diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 79adfe72a..66b5ac849 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,26 +1,24 @@ apiVersion: v1 kind: Namespace metadata: - labels: - control-plane: controller-manager name: system --- apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system labels: - control-plane: controller-manager + app.kubernetes.io/component: kusk-gateway-manager spec: selector: matchLabels: - control-plane: controller-manager + app.kubernetes.io/component: kusk-gateway-manager replicas: 1 template: metadata: labels: - control-plane: controller-manager + app.kubernetes.io/component: kusk-gateway-manager spec: securityContext: runAsNonRoot: true @@ -29,7 +27,7 @@ spec: - /manager args: - --leader-elect - image: controller:latest + image: kusk-gateway:latest name: manager securityContext: allowPrivilegeEscalation: false @@ -52,5 +50,5 @@ spec: requests: cpu: 100m memory: 20Mi - serviceAccountName: controller-manager + serviceAccountName: kusk-gateway-manager terminationGracePeriodSeconds: 10 diff --git a/config/manager/service.yaml b/config/manager/service.yaml index 5de2fb8d8..30198a9e0 100644 --- a/config/manager/service.yaml +++ b/config/manager/service.yaml @@ -1,12 +1,18 @@ apiVersion: v1 kind: Service metadata: - name: xds-service + name: kusk-gateway-xds-service namespace: system + labels: + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: xds-service + app.kubernetes.io/instance: kusk-gateway-development spec: ports: - port: 18000 name: xds targetPort: xds selector: - control-plane: controller-manager + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: kusk-gateway-manager + app.kubernetes.io/instance: kusk-gateway-development diff --git a/config/rbac/api_editor_role.yaml b/config/rbac/api_editor_role.yaml index e4ea7646f..aec952545 100644 --- a/config/rbac/api_editor_role.yaml +++ b/config/rbac/api_editor_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: api-editor-role + name: kusk-gateway-api-editor-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/api_viewer_role.yaml b/config/rbac/api_viewer_role.yaml index 59ef5ba7f..1d86bb197 100644 --- a/config/rbac/api_viewer_role.yaml +++ b/config/rbac/api_viewer_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: api-viewer-role + name: kusk-gateway-api-viewer-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index 51a75db47..15c09d501 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: metrics-reader + name: kusk-gateway-metrics-reader rules: - nonResourceURLs: - "/metrics" diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index 80e1857c5..f271bf96e 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: proxy-role + name: kusk-gateway-proxy-role rules: - apiGroups: - authentication.k8s.io diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index ec7acc0a1..08365cf77 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: proxy-rolebinding + name: kusk-gateway-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: proxy-role + name: kusk-gateway-proxy-role subjects: - kind: ServiceAccount - name: controller-manager + name: kusk-gateway-manager namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 6cf656be1..e8a813dbf 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -2,8 +2,10 @@ apiVersion: v1 kind: Service metadata: labels: - control-plane: controller-manager - name: controller-manager-metrics-service + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: metrics-service + app.kubernetes.io/instance: kusk-gateway-development + name: kusk-gateway-manager-metrics-service namespace: system spec: ports: @@ -11,4 +13,6 @@ spec: port: 8443 targetPort: https selector: - control-plane: controller-manager + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: kusk-gateway-manager + app.kubernetes.io/instance: kusk-gateway-development diff --git a/config/rbac/envoyfleet_editor_role.yaml b/config/rbac/envoyfleet_editor_role.yaml index 51fa66b64..d6a8e2113 100644 --- a/config/rbac/envoyfleet_editor_role.yaml +++ b/config/rbac/envoyfleet_editor_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: envoyfleet-editor-role + name: kusk-gateway-envoyfleet-editor-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/envoyfleet_manager_role.yaml b/config/rbac/envoyfleet_manager_role.yaml index 35c4313bd..2094248ba 100644 --- a/config/rbac/envoyfleet_manager_role.yaml +++ b/config/rbac/envoyfleet_manager_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: envoyfleet-manager-role + name: kusk-gateway-envoyfleet-manager-role rules: - apiGroups: - "apps" diff --git a/config/rbac/envoyfleet_manager_role_binding.yaml b/config/rbac/envoyfleet_manager_role_binding.yaml index ab7c87f85..5e16157e9 100644 --- a/config/rbac/envoyfleet_manager_role_binding.yaml +++ b/config/rbac/envoyfleet_manager_role_binding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: envoyfleet-manager-rolebinding + name: kusk-gateway-envoyfleet-manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: envoyfleet-manager-role + name: kusk-gateway-envoyfleet-manager-role subjects: - kind: ServiceAccount - name: controller-manager + name: kusk-gateway-manager namespace: system diff --git a/config/rbac/envoyfleet_viewer_role.yaml b/config/rbac/envoyfleet_viewer_role.yaml index 7cce270f6..05dd192ba 100644 --- a/config/rbac/envoyfleet_viewer_role.yaml +++ b/config/rbac/envoyfleet_viewer_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: envoyfleet-viewer-role + name: kusk-gateway-envoyfleet-viewer-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 4190ec805..21d46ab2b 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: leader-election-role + name: kusk-gateway-leader-election-role rules: - apiGroups: - "" diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index 1d1321ed4..c290be366 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: leader-election-rolebinding + name: kusk-gateway-leader-election-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: leader-election-role subjects: - kind: ServiceAccount - name: controller-manager + name: kusk-gateway-manager namespace: system diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3c30f6cf7..f169a7cca 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: creationTimestamp: null - name: manager-role + name: kusk-gateway-manager-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 2070ede44..3b4bde997 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: manager-rolebinding + name: kusk-gateway-manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: manager-role + name: kusk-gateway-manager-role subjects: - kind: ServiceAccount - name: controller-manager + name: kusk-gateway-manager namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 7cd6025bf..642165a58 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -1,5 +1,5 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: controller-manager + name: kusk-gateway-manager namespace: system diff --git a/config/rbac/staticroute_editor_role.yaml b/config/rbac/staticroute_editor_role.yaml index d8eafd4e9..337aa8f99 100644 --- a/config/rbac/staticroute_editor_role.yaml +++ b/config/rbac/staticroute_editor_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: staticroute-editor-role + name: kusk-gateway-staticroute-editor-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/rbac/staticroute_viewer_role.yaml b/config/rbac/staticroute_viewer_role.yaml index 375635e1b..749321c0a 100644 --- a/config/rbac/staticroute_viewer_role.yaml +++ b/config/rbac/staticroute_viewer_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: staticroute-viewer-role + name: kusk-gateway-staticroute-viewer-role rules: - apiGroups: - gateway.kusk.io diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 31e0f8295..543a10df2 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -2,11 +2,17 @@ apiVersion: v1 kind: Service metadata: - name: webhook-service + name: kusk-gateway-webhooks-service namespace: system + labels: + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: kusk-gateway-webhooks-service + app.kubernetes.io/instance: kusk-gateway-development spec: ports: - port: 443 - targetPort: 9443 + targetPort: webhook-server selector: - control-plane: controller-manager + app.kubernetes.io/name: kusk-gateway + app.kubernetes.io/component: kusk-gateway-manager + app.kubernetes.io/instance: kusk-gateway-development diff --git a/controllers/envoyfleet_resources.go b/controllers/envoyfleet_resources.go index 07001baec..2fe59500a 100644 --- a/controllers/envoyfleet_resources.go +++ b/controllers/envoyfleet_resources.go @@ -198,7 +198,6 @@ func (f *EnvoyFleetResources) CreateService(ef *gatewayv1alpha1.EnvoyFleet) erro labels[key] = value } serviceName := "kusk-envoy-svc-" + ef.Name - f.service = &corev1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", From 7a8b77f5d7d4c04d975c93db975a318faf132dc7 Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Thu, 9 Dec 2021 19:13:56 +0200 Subject: [PATCH 3/8] Fixes --- Makefile | 6 +- api/v1alpha1/envoyfleet_types.go | 3 + .../crd/bases/gateway.kusk.io_envoyfleet.yaml | 4 + config/default/kustomization.yaml | 2 +- config/local/kustomization.yaml | 4 +- config/manager/kustomization.yaml | 3 +- config/manager/service.yaml | 4 - config/prometheus/monitor.yaml | 6 +- config/rbac/leader_election_role_binding.yaml | 2 +- config/samples/gateway_v1_envoyfleet.yaml | 18 +-- config/webhook/kustomization.yaml | 30 ++++ config/webhook/service.yaml | 4 - config/webhook/webhookpatch.json | 12 ++ controllers/envoyfleet_controller.go | 44 +++--- controllers/envoyfleet_resources.go | 129 +++++++++++------- development/cluster/create-env.sh | 2 +- docs/development.md | 4 +- docs/troubleshooting.md | 4 +- k8sutils/utils.go | 13 ++ 19 files changed, 196 insertions(+), 98 deletions(-) create mode 100644 config/webhook/webhookpatch.json diff --git a/Makefile b/Makefile index 2fd97469c..e3517b1a8 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified $(KUSTOMIZE) build config/crd | kubectl delete -f - deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image manager=${IMG} + cd config/manager && $(KUSTOMIZE) edit set image kusk-gateway=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - deploy-debug: manifests kustomize ## Deploy controller with debugger to the K8s cluster specified in ~/.kube/config. @@ -112,8 +112,8 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi $(KUSTOMIZE) build config/default | kubectl delete -f - cycle: ## Trigger manager deployment rollout restart to pick up the new container image with the same tag - kubectl rollout restart deployment/kusk-controller-manager -n kusk-system - @echo "Triggered deployment/kusk-controller-manager restart" + kubectl rollout restart deployment/kusk-gateway-manager -n kusk-system + @echo "Triggered deployment/kusk-gateway-manager restart" CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. diff --git a/api/v1alpha1/envoyfleet_types.go b/api/v1alpha1/envoyfleet_types.go index 99084b678..62c08166e 100644 --- a/api/v1alpha1/envoyfleet_types.go +++ b/api/v1alpha1/envoyfleet_types.go @@ -97,6 +97,9 @@ type ServiceConfig struct { type EnvoyFleetStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + // State indicates Envoy Fleet state + State string `json:"state,omitempty"` } //+kubebuilder:object:root=true diff --git a/config/crd/bases/gateway.kusk.io_envoyfleet.yaml b/config/crd/bases/gateway.kusk.io_envoyfleet.yaml index 2fea9153b..a845a9f5b 100644 --- a/config/crd/bases/gateway.kusk.io_envoyfleet.yaml +++ b/config/crd/bases/gateway.kusk.io_envoyfleet.yaml @@ -1057,6 +1057,10 @@ spec: type: object status: description: EnvoyFleetStatus defines the observed state of EnvoyFleet + properties: + state: + description: State indicates Envoy Fleet state + type: string type: object required: - spec diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 143fa0c62..30a9bf064 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -27,7 +27,7 @@ bases: patchesStrategicMerge: # Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics +# If you want your kusk-gateway-manager to expose the /metrics # endpoint w/o any authn/z, please comment the following line. - manager_auth_proxy_patch.yaml diff --git a/config/local/kustomization.yaml b/config/local/kustomization.yaml index 49dc34dd6..fd081d1a8 100644 --- a/config/local/kustomization.yaml +++ b/config/local/kustomization.yaml @@ -6,7 +6,7 @@ namespace: kusk-system # "wordpress" becomes "alices-wordpress". # Note that it should also match with the prefix (text before '-') of the namespace # field above. -namePrefix: kusk- +#namePrefix: kusk- # Labels to add to all resources and selectors. commonLabels: @@ -28,7 +28,7 @@ bases: patchesStrategicMerge: # Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics +# If you want your kusk-gateway-manager to expose the /metrics # endpoint w/o any authn/z, please comment the following line. #- manager_auth_proxy_patch.yaml diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 05590d0e5..dad009124 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -11,7 +11,8 @@ configMapGenerator: - files: - controller_manager_config.yaml name: kusk-gateway-config + images: -- name: manager +- name: kusk-gateway newName: kusk-gateway newTag: dev diff --git a/config/manager/service.yaml b/config/manager/service.yaml index 30198a9e0..1bcc92ea6 100644 --- a/config/manager/service.yaml +++ b/config/manager/service.yaml @@ -4,15 +4,11 @@ metadata: name: kusk-gateway-xds-service namespace: system labels: - app.kubernetes.io/name: kusk-gateway app.kubernetes.io/component: xds-service - app.kubernetes.io/instance: kusk-gateway-development spec: ports: - port: 18000 name: xds targetPort: xds selector: - app.kubernetes.io/name: kusk-gateway app.kubernetes.io/component: kusk-gateway-manager - app.kubernetes.io/instance: kusk-gateway-development diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index d19136ae7..abeb794c6 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -4,8 +4,8 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: - control-plane: controller-manager - name: controller-manager-metrics-monitor + control-plane: kusk-gateway-manager + name: kusk-gateway-manager-metrics-monitor namespace: system spec: endpoints: @@ -17,4 +17,4 @@ spec: insecureSkipVerify: true selector: matchLabels: - control-plane: controller-manager + control-plane: kusk-gateway-manager diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index c290be366..d2d0a471d 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -5,7 +5,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: leader-election-role + name: kusk-gateway-leader-election-role subjects: - kind: ServiceAccount name: kusk-gateway-manager diff --git a/config/samples/gateway_v1_envoyfleet.yaml b/config/samples/gateway_v1_envoyfleet.yaml index d85561456..c6d776157 100644 --- a/config/samples/gateway_v1_envoyfleet.yaml +++ b/config/samples/gateway_v1_envoyfleet.yaml @@ -27,9 +27,9 @@ spec: cpu: 10m memory: 100M # Put annotations to scrape pods. - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '9102' + # annotations: + # prometheus.io/scrape: 'true' + # prometheus.io/port: '9102' # Scheduling directives # Read https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for the details. @@ -46,8 +46,8 @@ spec: # effect: "NoSchedule" # Optional - provide pods affinity and anti-affinity settings. - # This newer and more flexible scheme can replace nodeSelector but they can be specified together too. - # For the scalability and fault tolerance we prefer to put all Envoy pods on different nodes - in case one node fails we survive on others. + # This is more flexible than nodeSelector scheme but they can be specified together. + # For the scalability and fault tolerance we prefer to put all Envoy pods onto different nodes - in a case one node fails we survive on others. # The block below will search for all matching labels of THIS "default" envoy fleet pods and will try to schedule pods # on different nodes. # Other fleets (if present) are not taken into consideration. You can specify nodeAffinity and podAffinity as well. @@ -56,14 +56,10 @@ spec: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: - # - key: app + # - key: app.kubernetes.io/name # operator: In # values: - # - kusk-gateway - # - key: component - # operator: In - # values: - # - envoy + # - kusk-gateway-envoy-fleet # - key: fleet # operator: In # values: diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml index 9cf26134e..35ec1fd3b 100644 --- a/config/webhook/kustomization.yaml +++ b/config/webhook/kustomization.yaml @@ -1,6 +1,36 @@ +namespace: kusk-system + resources: - manifests.yaml - service.yaml configurations: - kustomizeconfig.yaml + + +# Webhook configuration and names are hardcoded by controller-gen in manifests.yaml so we need to override paths and names with kustomize +patches: +- path: webhookpatch.json + target: + group: admissionregistration.k8s.io + version: v1 + kind: MutatingWebhookConfiguration +- patch: |- + - op: replace + path: /metadata/name + value: kusk-gateway-mutating-webhook-configuration + target: + group: admissionregistration.k8s.io + version: v1 + kind: MutatingWebhookConfiguration +- path: webhookpatch.json + target: + group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration +- patch: |- + - op: replace + path: /metadata/name + value: kusk-gateway-validating-webhook-configuration + target: + group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 543a10df2..0d2b32c78 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -5,14 +5,10 @@ metadata: name: kusk-gateway-webhooks-service namespace: system labels: - app.kubernetes.io/name: kusk-gateway app.kubernetes.io/component: kusk-gateway-webhooks-service - app.kubernetes.io/instance: kusk-gateway-development spec: ports: - port: 443 targetPort: webhook-server selector: - app.kubernetes.io/name: kusk-gateway app.kubernetes.io/component: kusk-gateway-manager - app.kubernetes.io/instance: kusk-gateway-development diff --git a/config/webhook/webhookpatch.json b/config/webhook/webhookpatch.json new file mode 100644 index 000000000..56db3e148 --- /dev/null +++ b/config/webhook/webhookpatch.json @@ -0,0 +1,12 @@ +[ + { + "op": "replace", + "path": "/webhooks/0/clientConfig/service/name", + "value": "kusk-gateway-webhooks-service" + }, + { + "op": "replace", + "path": "/webhooks/1/clientConfig/service/name", + "value": "kusk-gateway-webhooks-service" + } +] diff --git a/controllers/envoyfleet_controller.go b/controllers/envoyfleet_controller.go index fae0797be..22f86b120 100644 --- a/controllers/envoyfleet_controller.go +++ b/controllers/envoyfleet_controller.go @@ -27,6 +27,7 @@ package controllers import ( "context" "fmt" + "time" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -35,7 +36,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" gatewayv1alpha1 "github.com/kubeshop/kusk-gateway/api/v1alpha1" - "github.com/kubeshop/kusk-gateway/k8sutils" +) + +const ( + reconcilerDefaultRetrySeconds int = 30 ) // EnvoyFleetReconciler reconciles a EnvoyFleet object @@ -74,26 +78,32 @@ func (r *EnvoyFleetReconciler) Reconcile(ctx context.Context, req ctrl.Request) l.Error(err, "Failed setting controller owner reference") return ctrl.Result{}, err } - efResources, err := NewEnvoyFleetResources(ef) + // Generate Envoy Fleet resources... + efResources, err := NewEnvoyFleetResources(ctx, r.Client, ef) if err != nil { - l.Error(err, "Failed to create envoy fleet configuration") - return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet configuration: %w", err) - } - - if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.configMap); err != nil { - l.Error(err, "Failed to create envoy envoy config map") - return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet config map: %w", err) + l.Error(err, "Failed to create EnvoyFleet configuration") + ef.Status.State = fmt.Sprint("Failed to create EnvoyFleet configuration: ", err) + if err := r.Client.Status().Update(ctx, ef); err != nil { + l.Error(err, "Unable to update Envoy Fleet status") + } + return ctrl.Result{}, fmt.Errorf("failed to create EnvoyFleet configuration: %w", err) } - if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.deployment); err != nil { - l.Error(err, "Failed to create envoy deployment") - return ctrl.Result{}, fmt.Errorf("failed to create envoy fleet deployment: %w", err) + // and deploy them + if err = efResources.CreateOrUpdate(ctx); err != nil { + l.Error(err, fmt.Sprintf("Failed to reconcile EnvoyFleet, will retry in %d seconds", reconcilerDefaultRetrySeconds)) + ef.Status.State = fmt.Sprint("Failed to reconcile EnvoyFleet configuration: ", err) + if err := r.Client.Status().Update(ctx, ef); err != nil { + l.Error(err, "Unable to update Envoy Fleet status") + } + return ctrl.Result{RequeueAfter: time.Duration(time.Duration(reconcilerDefaultRetrySeconds) * time.Second)}, fmt.Errorf("failed to create or update EnvoyFleet: %w", err) } - if err := k8sutils.CreateOrReplace(ctx, r.Client, efResources.service); err != nil { - l.Error(err, "Failed to create envoy fleet service") - return ctrl.Result{}, fmt.Errorf("failed to create envoy fleetconfig map: %w", err) + l.Info(fmt.Sprintf("Reconciled EnvoyFleet '%s' resources", ef.Name)) + ef.Status.State = "EnvoyFleet was successfully deployed" + if err := r.Client.Status().Update(ctx, ef); err != nil { + l.Error(err, "Unable to update Envoy Fleet status") + return ctrl.Result{RequeueAfter: time.Duration(time.Duration(reconcilerDefaultRetrySeconds) * time.Second)}, fmt.Errorf("unable to update Envoy Fleet status") } - l.Info(fmt.Sprintf("Reconciled Envoy Fleet '%s' resources", efResources.fleetName)) - + l.Info("UPDATED STATUS") return ctrl.Result{}, nil } diff --git a/controllers/envoyfleet_resources.go b/controllers/envoyfleet_resources.go index 2fe59500a..1d02bd449 100644 --- a/controllers/envoyfleet_resources.go +++ b/controllers/envoyfleet_resources.go @@ -1,6 +1,7 @@ package controllers import ( + "context" "fmt" appsv1 "k8s.io/api/apps/v1" @@ -8,6 +9,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" gatewayv1alpha1 "github.com/kubeshop/kusk-gateway/api/v1alpha1" + "github.com/kubeshop/kusk-gateway/k8sutils" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -18,47 +21,81 @@ const ( // EnvoyFleetResources is a collection of related Envoy Fleet K8s resources type EnvoyFleetResources struct { - fleetName string + client client.Client + fleet *gatewayv1alpha1.EnvoyFleet configMap *corev1.ConfigMap deployment *appsv1.Deployment service *corev1.Service commonLabels map[string]string } -func NewEnvoyFleetResources(ef *gatewayv1alpha1.EnvoyFleet) (*EnvoyFleetResources, error) { +func NewEnvoyFleetResources(ctx context.Context, client client.Client, ef *gatewayv1alpha1.EnvoyFleet) (*EnvoyFleetResources, error) { f := &EnvoyFleetResources{ - fleetName: ef.Name, + client: client, + fleet: ef, commonLabels: map[string]string{ - "app": "kusk-gateway", - "fleet": ef.Name, + "app.kubernetes.io/name": "kuks-gateway-envoy-fleet", + "app.kubernetes.io/managed-by": "kusk-gateway-manager", + "app.kubernetes.io/created-by": "kusk-gateway-manager", + "app.kubernetes.io/part-of": "kusk-gateway", + "app.kubernetes.io/instance": ef.Name, + "fleet": ef.Name, }, } - if err := f.CreateConfigMap(ef); err != nil { + if err := f.generateConfigMap(ctx); err != nil { return nil, err } // Depends on the ConfigMap - if err := f.CreateDeployment(ef); err != nil { + if err := f.generateDeployment(); err != nil { return nil, err } // Depends on the Service - if err := f.CreateService(ef); err != nil { + if err := f.generateService(); err != nil { return nil, err } return f, nil } -func (e *EnvoyFleetResources) CreateConfigMap(ef *gatewayv1alpha1.EnvoyFleet) error { +func (e *EnvoyFleetResources) CreateOrUpdate(ctx context.Context) error { + if err := k8sutils.CreateOrReplace(ctx, e.client, e.configMap); err != nil { + return fmt.Errorf("failed to deploy Envoy Fleet config map: %w", err) + } + if err := k8sutils.CreateOrReplace(ctx, e.client, e.deployment); err != nil { + return fmt.Errorf("failed to deploy Envoy Fleet deployment: %w", err) + } + if err := k8sutils.CreateOrReplace(ctx, e.client, e.service); err != nil { + return fmt.Errorf("failed to deploy Envoy Fleet service: %w", err) + } + return nil +} + +func (e *EnvoyFleetResources) generateConfigMap(ctx context.Context) error { // future object labels labels := map[string]string{ - "component": "envoy-config", + "app.kubernetes.io/component": "envoy-config", } // Copy over shared labels map for key, value := range e.commonLabels { labels[key] = value } - configMapName := "kusk-envoy-config-" + e.fleetName + configMapName := "kusk-gateway-envoy-config-" + e.fleet.Name + + xdsLabels := map[string]string{"app.kubernetes.io/name": "kusk-gateway", "app.kubernetes.io/component": "xds-service"} + xdsServices, err := k8sutils.GetServicesByLabels(ctx, e.client, xdsLabels) + if err != nil { + return fmt.Errorf("cannot create Envoy Fleet %s config map: %w", e.fleet.Name, err) + } + switch svcs := len(xdsServices); { + case svcs == 0: + return fmt.Errorf("cannot create Envoy Fleet %s config map: no xds services detected in the cluster when searching with the labels %s", e.fleet.Name, xdsLabels) + case svcs > 1: + return fmt.Errorf("cannot create Envoy Fleet %s config map: multiple xds services detected in the cluster when searching with the labels %s", e.fleet.Name, xdsLabels) + } + // At this point - we have exactly one service with (we ASSUME!) one port + xdsServiceHostname := fmt.Sprintf("%s.%s.svc.cluster.local.", xdsServices[0].Name, xdsServices[0].Namespace) + xdsServicePort := xdsServices[0].Spec.Ports[0].Port e.configMap = &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ @@ -67,35 +104,35 @@ func (e *EnvoyFleetResources) CreateConfigMap(ef *gatewayv1alpha1.EnvoyFleet) er }, ObjectMeta: metav1.ObjectMeta{ Name: configMapName, - Namespace: ef.Namespace, + Namespace: e.fleet.Namespace, Labels: labels, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(e.fleet)}, }, Data: map[string]string{ - "envoy-config.yaml": fmt.Sprintf(envoyConfigTemplate, e.fleetName), + "envoy-config.yaml": fmt.Sprintf(envoyConfigTemplate, e.fleet.Name, xdsServiceHostname, xdsServicePort), }, } return nil } -func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) error { +func (e *EnvoyFleetResources) generateDeployment() error { // future object labels labels := map[string]string{ - "component": "envoy", + "app.kubernetes.io/component": "envoy", } // Copy over shared labels map for key, value := range e.commonLabels { labels[key] = value } - deploymentName := "kusk-envoy-" + e.fleetName + deploymentName := "kusk-gateway-envoy-" + e.fleet.Name configMapName := e.configMap.Name envoyContainer := corev1.Container{ Name: "envoy", - Image: ef.Spec.Image, + Image: e.fleet.Spec.Image, ImagePullPolicy: "IfNotPresent", Command: []string{"/bin/sh", "-c"}, Args: []string{ @@ -134,12 +171,12 @@ func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) e }, } // Set Enovy Pod Resources if specified - if ef.Spec.Resources != nil { - if ef.Spec.Resources.Limits != nil { - envoyContainer.Resources.Limits = *&ef.Spec.Resources.Limits + if e.fleet.Spec.Resources != nil { + if e.fleet.Spec.Resources.Limits != nil { + envoyContainer.Resources.Limits = *&e.fleet.Spec.Resources.Limits } - if ef.Spec.Resources.Requests != nil { - envoyContainer.Resources.Requests = *&ef.Spec.Resources.Requests + if e.fleet.Spec.Resources.Requests != nil { + envoyContainer.Resources.Requests = *&e.fleet.Spec.Resources.Requests } } e.deployment = &appsv1.Deployment{ @@ -149,13 +186,13 @@ func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) e }, ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, - Namespace: ef.Namespace, + Namespace: e.fleet.Namespace, Labels: labels, - Annotations: ef.Spec.Annotations, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + Annotations: e.fleet.Spec.Annotations, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(e.fleet)}, }, Spec: appsv1.DeploymentSpec{ - Replicas: ef.Spec.Size, + Replicas: e.fleet.Spec.Size, Selector: labelSelectors(labels), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -177,10 +214,10 @@ func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) e }, }, }, - NodeSelector: ef.Spec.NodeSelector, - Affinity: ef.Spec.Affinity, - Tolerations: ef.Spec.Tolerations, - TerminationGracePeriodSeconds: ef.Spec.TerminationGracePeriodSeconds, + NodeSelector: e.fleet.Spec.NodeSelector, + Affinity: e.fleet.Spec.Affinity, + Tolerations: e.fleet.Spec.Tolerations, + TerminationGracePeriodSeconds: e.fleet.Spec.TerminationGracePeriodSeconds, }, }, }, @@ -188,37 +225,37 @@ func (e *EnvoyFleetResources) CreateDeployment(ef *gatewayv1alpha1.EnvoyFleet) e return nil } -func (f *EnvoyFleetResources) CreateService(ef *gatewayv1alpha1.EnvoyFleet) error { +func (e *EnvoyFleetResources) generateService() error { // future object labels labels := map[string]string{ - "component": "envoy-svc", + "app.kubernetes.io/component": "envoy-svc", } // Copy over shared labels map - for key, value := range f.commonLabels { + for key, value := range e.commonLabels { labels[key] = value } - serviceName := "kusk-envoy-svc-" + ef.Name - f.service = &corev1.Service{ + serviceName := "kusk-gateway-envoy-svc-" + e.fleet.Name + e.service = &corev1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: serviceName, - Namespace: ef.Namespace, + Namespace: e.fleet.Namespace, Labels: labels, - Annotations: ef.Spec.Service.Annotations, - OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(ef)}, + Annotations: e.fleet.Spec.Service.Annotations, + OwnerReferences: []metav1.OwnerReference{envoyFleetAsOwner(e.fleet)}, }, Spec: corev1.ServiceSpec{ - Ports: ef.Spec.Service.Ports, - Selector: f.deployment.Spec.Selector.MatchLabels, - Type: ef.Spec.Service.Type, + Ports: e.fleet.Spec.Service.Ports, + Selector: e.deployment.Spec.Selector.MatchLabels, + Type: e.fleet.Spec.Service.Type, }, } // Static IP address for the LoadBalancer - if ef.Spec.Service.Type == corev1.ServiceTypeLoadBalancer && ef.Spec.Service.LoadBalancerIP != "" { - f.service.Spec.LoadBalancerIP = ef.Spec.Service.LoadBalancerIP + if e.fleet.Spec.Service.Type == corev1.ServiceTypeLoadBalancer && e.fleet.Spec.Service.LoadBalancerIP != "" { + e.service.Spec.LoadBalancerIP = e.fleet.Spec.Service.LoadBalancerIP } return nil @@ -272,8 +309,8 @@ static_resources: - endpoint: address: socket_address: - address: kusk-xds-service.kusk-system.svc.cluster.local - port_value: 18000 + address: %s + port_value: %d admin: address: diff --git a/development/cluster/create-env.sh b/development/cluster/create-env.sh index c8165a529..725168507 100755 --- a/development/cluster/create-env.sh +++ b/development/cluster/create-env.sh @@ -53,4 +53,4 @@ SHELL=/bin/bash eval $(minikube docker-env --profile "kgw") make docker-build deploy -kubectl rollout status -w deployment/kusk-controller-manager -n kusk-system \ No newline at end of file +kubectl rollout status -w deployment/kusk-gateway-manager -n kusk-system diff --git a/docs/development.md b/docs/development.md index 9027cedbc..7b40ca66b 100644 --- a/docs/development.md +++ b/docs/development.md @@ -10,7 +10,7 @@ Internally Kusk Gateway uses the [go-control-plane](https://github.com/envoyprox - [Goland](https://www.jetbrains.com/help/go/attach-to-running-go-processes-with-debugger.html#attach-to-a-process-in-the-docker-container) - [VSCode](https://github.com/golang/vscode-go/blob/master/docs/debugging.md#configure) (see below for a working example) - Run `make create-env` -- When the make script is waiting for kusk-controller-manager to become healthy, run `kubectl port-forward deployment/kusk-controller-manager -n kusk-system 40000:40000` in a new terminal window +- When the make script is waiting for kusk-gateway-manager to become healthy, run `kubectl port-forward deployment/kusk-gateway-manager -n kusk-system 40000:40000` in a new terminal window - Run your debug configuration from your IDE. The pod won't become healthy until you do this as Delve waits for a connection on :40000. - When the script completes, you can now deploy the httpbin example that creates a backend API service and pushes gateway CRDs to configure Envoy with `kubectl apply -f examples/httpbin`. - Place breakpoints in the code and debug as normal @@ -19,7 +19,7 @@ To test changes to the code, run the following: - `make generate manifests install docker-build` - If your running the code in minikube, don't forget to `eval $(minikube docker-env [--profile "$PROFILE_NAME"])` - e.g. `eval $(minikube docker-env --profile "kgw")` if you ran `make create-env` -- restart kusk-gateway deployment to pick up the new image - `kubectl rollout restart deployment/kusk-controller-manager -n kusk-system` +- restart kusk-gateway deployment to pick up the new image - `kubectl rollout restart deployment/kusk-gateway-manager -n kusk-system` #### VSCode launch.json example ``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2733f65f8..a99217410 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -6,8 +6,8 @@ ``` ❯ kubectl get deployment -n kusk-system NAME READY UP-TO-DATE AVAILABLE AGE -kusk-controller-manager 1/1 1 1 15m -kusk-envoy-default 1/1 1 1 2m33s +kusk-gateway-manager 1/1 1 1 15m +kusk-gateway-envoy-default 1/1 1 1 2m33s ``` For this example, it's `kusk-envoy-default`. Be sure to query the correct namespace for your installation. diff --git a/k8sutils/utils.go b/k8sutils/utils.go index 026c1c811..827e3a277 100644 --- a/k8sutils/utils.go +++ b/k8sutils/utils.go @@ -2,7 +2,9 @@ package k8sutils import ( "context" + "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,3 +41,14 @@ func CreateOrReplace(ctx context.Context, client clientPkg.Client, obj clientPkg return client.Create(ctx, obj) } + +func GetServicesByLabels(ctx context.Context, client clientPkg.Client, labels map[string]string) ([]corev1.Service, error) { + labelSelector := clientPkg.MatchingLabels(labels) + + servicesList := &corev1.ServiceList{} + if err := client.List(ctx, servicesList, labelSelector); err != nil { + return []corev1.Service{}, fmt.Errorf("failed getting services from the cluster: %w", err) + } + + return servicesList.Items, nil +} From 629d0e72fe977765dd85ca2db9c6d686138436c7 Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Fri, 10 Dec 2021 12:42:49 +0200 Subject: [PATCH 4/8] Fixes --- Makefile | 21 +++---- config/debug/kustomization.yaml | 13 +++++ .../manager_debug_patch.yaml | 3 + config/default/kustomization.yaml | 55 ++++++++++--------- controllers/envoyfleet_controller.go | 15 +++-- controllers/envoyfleet_resources.go | 31 +++++------ docs/development.md | 37 +++++++++---- 7 files changed, 108 insertions(+), 67 deletions(-) create mode 100644 config/debug/kustomization.yaml rename config/{default => debug}/manager_debug_patch.yaml (86%) diff --git a/Makefile b/Makefile index e3517b1a8..f632ef6a1 100644 --- a/Makefile +++ b/Makefile @@ -75,10 +75,11 @@ run: install-local generate fmt vet ## Run a controller from your host, proxying ktunnel expose -n kusk-system kusk-xds-service 18000 & ENABLE_WEBHOOKS=false bin/manager ; fg docker-build: ## Build docker image with the manager. - DOCKER_BUILDKIT=1 docker build -t ${IMG} . + @eval $$(minikube docker-env --profile kgw); DOCKER_BUILDKIT=1 docker build -t ${IMG} . + +docker-build-debug: ## Build docker image with the manager and debugger. + @eval $$(minikube docker-env --profile kgw) ;DOCKER_BUILDKIT=1 docker build -t "${IMG}-debug" -f ./Dockerfile-debug . -docker-build-debug:## Build docker image with the manager and debugger. - DOCKER_BUILDKIT=1 docker build -t "${IMG}-debug" -f ./Dockerfile-debug . docker-push: ## Push docker image with the manager. docker push ${IMG} @@ -99,21 +100,21 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified $(KUSTOMIZE) build config/crd | kubectl delete -f - deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image kusk-gateway=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - deploy-debug: manifests kustomize ## Deploy controller with debugger to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}-debug - cd config/default && $(KUSTOMIZE) edit add patch --path ./manager_debug_patch.yaml - $(KUSTOMIZE) build config/default | kubectl apply -f - - cd config/default && $(KUSTOMIZE) edit remove patch --path ./manager_debug_patch.yaml + $(KUSTOMIZE) build config/debug | kubectl apply -f - undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - -cycle: ## Trigger manager deployment rollout restart to pick up the new container image with the same tag +update: docker-build deploy cycle ## Runs deploy, docker build and restarts kusk-gateway-manager deployment to pick up the change +update-debug: docker-build-debug deploy-debug cycle ## Runs Debug configuration deploy, docker build and restarts kusk-gateway-manager deployment to pick up the change + +cycle: ## Triggers kusk-gateway-manager deployment rollout restart to pick up the new container image with the same tag kubectl rollout restart deployment/kusk-gateway-manager -n kusk-system - @echo "Triggered deployment/kusk-gateway-manager restart" + @echo "Triggered deployment/kusk-gateway-manager restart, waiting for it to finish" + kubectl rollout status deployment/kusk-gateway-manager -n kusk-system --timeout=30s CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. diff --git a/config/debug/kustomization.yaml b/config/debug/kustomization.yaml new file mode 100644 index 000000000..2879ac94d --- /dev/null +++ b/config/debug/kustomization.yaml @@ -0,0 +1,13 @@ +# This kustomization is used to setup Debugging of kusk-gateway-manager in the cluster +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../default + +patchesStrategicMerge: +- manager_debug_patch.yaml + +images: +- name: kusk-gateway + newName: kusk-gateway + newTag: dev-debug diff --git a/config/default/manager_debug_patch.yaml b/config/debug/manager_debug_patch.yaml similarity index 86% rename from config/default/manager_debug_patch.yaml rename to config/debug/manager_debug_patch.yaml index 9be5a9862..41ad22665 100644 --- a/config/default/manager_debug_patch.yaml +++ b/config/debug/manager_debug_patch.yaml @@ -9,6 +9,9 @@ spec: containers: - name: manager resources: null + # Disable healtchecks for the debuging image + livenessProbe: null + readinessProbe: null command: - /dlv - --listen=:40000 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 30a9bf064..6126dc0f5 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,3 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../crd +- ../rbac +- ../manager +- ../webhook +- ../certmanager + # Adds namespace to all resources. namespace: kusk-system @@ -10,26 +19,18 @@ namespace: kusk-system # Labels to add to all resources and selectors. commonLabels: - app.kubernetes.io/name: kusk-gateway app.kubernetes.io/instance: kusk-gateway-development + app.kubernetes.io/name: kusk-gateway -bases: -- ../crd -- ../rbac -- ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus -patchesStrategicMerge: # Protect the /metrics endpoint by putting it behind auth. # If you want your kusk-gateway-manager to expose the /metrics # endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml # Mount the controller config file for loading manager configurations # through a ComponentConfig type @@ -37,39 +38,43 @@ patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -- manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection +patchesStrategicMerge: +- manager_auth_proxy_patch.yaml +- manager_webhook_patch.yaml - webhookcainjection_patch.yaml # the following config is for teaching kustomize how to do var substitution -vars: # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +vars: +- fieldref: + fieldPath: metadata.namespace + name: CERTIFICATE_NAMESPACE objref: - kind: Certificate group: cert-manager.io + kind: Certificate + name: serving-cert version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace -- name: CERTIFICATE_NAME +- fieldref: {} + name: CERTIFICATE_NAME objref: - kind: Certificate group: cert-manager.io + kind: Certificate + name: serving-cert version: v1 - name: serving-cert # this name should match the one in certificate.yaml -- name: SERVICE_NAMESPACE # namespace of the service +- fieldref: + fieldPath: metadata.namespace + name: SERVICE_NAMESPACE objref: kind: Service - version: v1 name: kusk-gateway-webhooks-service - fieldref: - fieldpath: metadata.namespace -- name: SERVICE_NAME + version: v1 +- fieldref: {} + name: SERVICE_NAME objref: kind: Service - version: v1 name: kusk-gateway-webhooks-service + version: v1 diff --git a/controllers/envoyfleet_controller.go b/controllers/envoyfleet_controller.go index 22f86b120..1e9991348 100644 --- a/controllers/envoyfleet_controller.go +++ b/controllers/envoyfleet_controller.go @@ -34,12 +34,17 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" gatewayv1alpha1 "github.com/kubeshop/kusk-gateway/api/v1alpha1" ) const ( reconcilerDefaultRetrySeconds int = 30 + + // Used to set the State field in the Status + envoyFleetStateSuccess string = "Deployed" + envoyFleetStateFailure string = "Failed" ) // EnvoyFleetReconciler reconciles a EnvoyFleet object @@ -82,7 +87,7 @@ func (r *EnvoyFleetReconciler) Reconcile(ctx context.Context, req ctrl.Request) efResources, err := NewEnvoyFleetResources(ctx, r.Client, ef) if err != nil { l.Error(err, "Failed to create EnvoyFleet configuration") - ef.Status.State = fmt.Sprint("Failed to create EnvoyFleet configuration: ", err) + ef.Status.State = envoyFleetStateFailure if err := r.Client.Status().Update(ctx, ef); err != nil { l.Error(err, "Unable to update Envoy Fleet status") } @@ -91,25 +96,27 @@ func (r *EnvoyFleetReconciler) Reconcile(ctx context.Context, req ctrl.Request) // and deploy them if err = efResources.CreateOrUpdate(ctx); err != nil { l.Error(err, fmt.Sprintf("Failed to reconcile EnvoyFleet, will retry in %d seconds", reconcilerDefaultRetrySeconds)) - ef.Status.State = fmt.Sprint("Failed to reconcile EnvoyFleet configuration: ", err) + ef.Status.State = envoyFleetStateFailure if err := r.Client.Status().Update(ctx, ef); err != nil { l.Error(err, "Unable to update Envoy Fleet status") } return ctrl.Result{RequeueAfter: time.Duration(time.Duration(reconcilerDefaultRetrySeconds) * time.Second)}, fmt.Errorf("failed to create or update EnvoyFleet: %w", err) } l.Info(fmt.Sprintf("Reconciled EnvoyFleet '%s' resources", ef.Name)) - ef.Status.State = "EnvoyFleet was successfully deployed" + ef.Status.State = envoyFleetStateSuccess if err := r.Client.Status().Update(ctx, ef); err != nil { l.Error(err, "Unable to update Envoy Fleet status") return ctrl.Result{RequeueAfter: time.Duration(time.Duration(reconcilerDefaultRetrySeconds) * time.Second)}, fmt.Errorf("unable to update Envoy Fleet status") } - l.Info("UPDATED STATUS") return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *EnvoyFleetReconciler) SetupWithManager(mgr ctrl.Manager) error { + // predicate will prevent triggering the Reconciler on resource Status field changes. + pred := predicate.GenerationChangedPredicate{} return ctrl.NewControllerManagedBy(mgr). For(&gatewayv1alpha1.EnvoyFleet{}). + WithEventFilter(pred). Complete(r) } diff --git a/controllers/envoyfleet_resources.go b/controllers/envoyfleet_resources.go index 1d02bd449..3e6e107eb 100644 --- a/controllers/envoyfleet_resources.go +++ b/controllers/envoyfleet_resources.go @@ -26,15 +26,15 @@ type EnvoyFleetResources struct { configMap *corev1.ConfigMap deployment *appsv1.Deployment service *corev1.Service - commonLabels map[string]string + sharedLabels map[string]string } func NewEnvoyFleetResources(ctx context.Context, client client.Client, ef *gatewayv1alpha1.EnvoyFleet) (*EnvoyFleetResources, error) { f := &EnvoyFleetResources{ client: client, fleet: ef, - commonLabels: map[string]string{ - "app.kubernetes.io/name": "kuks-gateway-envoy-fleet", + sharedLabels: map[string]string{ + "app.kubernetes.io/name": "kusk-gateway-envoy-fleet", "app.kubernetes.io/managed-by": "kusk-gateway-manager", "app.kubernetes.io/created-by": "kusk-gateway-manager", "app.kubernetes.io/part-of": "kusk-gateway", @@ -47,13 +47,10 @@ func NewEnvoyFleetResources(ctx context.Context, client client.Client, ef *gatew return nil, err } // Depends on the ConfigMap - if err := f.generateDeployment(); err != nil { - return nil, err - } + f.generateDeployment() // Depends on the Service - if err := f.generateService(); err != nil { - return nil, err - } + f.generateService() + return f, nil } @@ -76,7 +73,7 @@ func (e *EnvoyFleetResources) generateConfigMap(ctx context.Context) error { "app.kubernetes.io/component": "envoy-config", } // Copy over shared labels map - for key, value := range e.commonLabels { + for key, value := range e.sharedLabels { labels[key] = value } @@ -116,13 +113,13 @@ func (e *EnvoyFleetResources) generateConfigMap(ctx context.Context) error { return nil } -func (e *EnvoyFleetResources) generateDeployment() error { +func (e *EnvoyFleetResources) generateDeployment() { // future object labels labels := map[string]string{ "app.kubernetes.io/component": "envoy", } // Copy over shared labels map - for key, value := range e.commonLabels { + for key, value := range e.sharedLabels { labels[key] = value } @@ -130,6 +127,7 @@ func (e *EnvoyFleetResources) generateDeployment() error { configMapName := e.configMap.Name + // Create container template first envoyContainer := corev1.Container{ Name: "envoy", Image: e.fleet.Spec.Image, @@ -179,6 +177,7 @@ func (e *EnvoyFleetResources) generateDeployment() error { envoyContainer.Resources.Requests = *&e.fleet.Spec.Resources.Requests } } + // Create deployment e.deployment = &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", @@ -222,16 +221,16 @@ func (e *EnvoyFleetResources) generateDeployment() error { }, }, } - return nil + return } -func (e *EnvoyFleetResources) generateService() error { +func (e *EnvoyFleetResources) generateService() { // future object labels labels := map[string]string{ "app.kubernetes.io/component": "envoy-svc", } // Copy over shared labels map - for key, value := range e.commonLabels { + for key, value := range e.sharedLabels { labels[key] = value } serviceName := "kusk-gateway-envoy-svc-" + e.fleet.Name @@ -258,7 +257,7 @@ func (e *EnvoyFleetResources) generateService() error { e.service.Spec.LoadBalancerIP = e.fleet.Spec.Service.LoadBalancerIP } - return nil + return } func envoyFleetAsOwner(cr *gatewayv1alpha1.EnvoyFleet) metav1.OwnerReference { trueVar := true diff --git a/docs/development.md b/docs/development.md index 7b40ca66b..259fa37f1 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,25 +4,37 @@ Kusk Gateway code is managed with the help of [Kubebuilder](https://github.com/k Internally Kusk Gateway uses the [go-control-plane](https://github.com/envoyproxy/go-control-plane) package to configure Envoy with its xDS protocol. +We use [Minikube](https://minikube.sigs.k8s.io/docs/start/) as development environment, so the following instructions and the Makefile in the project are tuned to it. + +Make sure you have Minikube installed before proceeding further. + +For the MacOS users, the additional configuration step is needed to setup and set as the default for Minikube driver [hyperkit](https://minikube.sigs.k8s.io/docs/drivers/hyperkit/). + ## Set up development environment + ### with in-cluster debugging -- Set up remote debugging for your IDE pointed at localhost:40000 + +- Set up remote debugging for your IDE pointed at localhost:40000 - [Goland](https://www.jetbrains.com/help/go/attach-to-running-go-processes-with-debugger.html#attach-to-a-process-in-the-docker-container) - [VSCode](https://github.com/golang/vscode-go/blob/master/docs/debugging.md#configure) (see below for a working example) -- Run `make create-env` -- When the make script is waiting for kusk-gateway-manager to become healthy, run `kubectl port-forward deployment/kusk-gateway-manager -n kusk-system 40000:40000` in a new terminal window -- Run your debug configuration from your IDE. The pod won't become healthy until you do this as Delve waits for a connection on :40000. -- When the script completes, you can now deploy the httpbin example that creates a backend API service and pushes gateway CRDs to configure Envoy with `kubectl apply -f examples/httpbin`. -- Place breakpoints in the code and debug as normal +- Run `make create-env`. Once this command finishes you should have the working environment with kusk-gateway-manager running in kusk-system namespace. +- To attach the IDE to the pod for debugging run `make update-debug` that will build the debug image inside the Minikube cluster and will update the kusk-gateway-manager deployment. + After the deployment kusk-gateway-manager pod will be alive but not running the application since Delve in the container will wait for you to connect to it on port 4000. + Run `kubectl port-forward deployment/kusk-gateway-manager -n kusk-system 40000:40000` in a new terminal window to create the port-forwarding to Delve port. It is advised to make this as a kind of Task to run from IDE. +- Run your debug configuration from IDE to connect to port-forwarded localhost port :40000. +- You can now deploy the httpbin example that creates a backend API service and pushes gateway CRDs to configure Envoy with `kubectl apply -f examples/httpbin`. +- Place breakpoints in the code and debug as normal. To test changes to the code, run the following: -- `make generate manifests install docker-build` - - If your running the code in minikube, don't forget to `eval $(minikube docker-env [--profile "$PROFILE_NAME"])` - - e.g. `eval $(minikube docker-env --profile "kgw")` if you ran `make create-env` -- restart kusk-gateway deployment to pick up the new image - `kubectl rollout restart deployment/kusk-gateway-manager -n kusk-system` + +- `make generate manifests install docker-build deploy cycle` for the usual builds (without the debugging) +- `make update` for the usual builds if only the manager code was changed and no CRDs update is needed. +- `make generate manifests install docker-build-debug deploy-debug cycle` for debug build. +- `make update-debug` for the debug builds if only the manager code was changed and no CRDs update is needed. #### VSCode launch.json example -``` + +```yaml { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. @@ -43,7 +55,8 @@ To test changes to the code, run the following: ``` ### Run kusk gateway locally + - Run `make create-env` - Run `kubectl apply -f ./config/samples/gateway_v1_envoyfleet.yaml -n kusk-system` -- Run `make run` +- Run `make run` - This runs the built binary on your machine and creates a tunnel to minikube so envoy and Kusk Gateway running in your IDE can communicate. From 4fe008ec6a5cb7b1f31ec048a6c2324aa8440cab Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Fri, 10 Dec 2021 14:12:17 +0200 Subject: [PATCH 5/8] Review Fixes --- controllers/envoyfleet_resources.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/envoyfleet_resources.go b/controllers/envoyfleet_resources.go index 3e6e107eb..2bfa77731 100644 --- a/controllers/envoyfleet_resources.go +++ b/controllers/envoyfleet_resources.go @@ -131,7 +131,7 @@ func (e *EnvoyFleetResources) generateDeployment() { envoyContainer := corev1.Container{ Name: "envoy", Image: e.fleet.Spec.Image, - ImagePullPolicy: "IfNotPresent", + ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sh", "-c"}, Args: []string{ "envoy -c /etc/envoy/envoy.yaml --service-node $POD_NAME", From 1a27a85d4ea6d14d4a922e1456a744c2545f4d1f Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Fri, 10 Dec 2021 16:41:22 +0200 Subject: [PATCH 6/8] get back kubebuilder version --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f632ef6a1..373e32c87 100644 --- a/Makefile +++ b/Makefile @@ -118,7 +118,7 @@ cycle: ## Triggers kusk-gateway-manager deployment rollout restart to pick up th CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) KUSTOMIZE = $(shell pwd)/bin/kustomize kustomize: ## Download kustomize locally if necessary. From 196f91b5b9a8b68bd73290c86f4965023eede05a Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Fri, 10 Dec 2021 17:26:15 +0200 Subject: [PATCH 7/8] Fix tests --- config/webhook/kustomization.yaml | 22 ++++++++++++---------- config/webhook/webhookpatch.json | 12 ------------ 2 files changed, 12 insertions(+), 22 deletions(-) delete mode 100644 config/webhook/webhookpatch.json diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml index 35ec1fd3b..cf00fac96 100644 --- a/config/webhook/kustomization.yaml +++ b/config/webhook/kustomization.yaml @@ -10,27 +10,29 @@ configurations: # Webhook configuration and names are hardcoded by controller-gen in manifests.yaml so we need to override paths and names with kustomize patches: -- path: webhookpatch.json - target: - group: admissionregistration.k8s.io - version: v1 - kind: MutatingWebhookConfiguration - patch: |- - op: replace path: /metadata/name value: kusk-gateway-mutating-webhook-configuration + - op: replace + path: /webhooks/0/clientConfig/service/name + value: kusk-gateway-webhooks-service + - op: replace + path: /webhooks/1/clientConfig/service/name + value: kusk-gateway-webhooks-service target: group: admissionregistration.k8s.io - version: v1 kind: MutatingWebhookConfiguration -- path: webhookpatch.json - target: - group: admissionregistration.k8s.io - kind: ValidatingWebhookConfiguration - patch: |- - op: replace path: /metadata/name value: kusk-gateway-validating-webhook-configuration + - op: replace + path: /webhooks/0/clientConfig/service/name + value: kusk-gateway-webhooks-service + - op: replace + path: /webhooks/1/clientConfig/service/name + value: kusk-gateway-webhooks-service target: group: admissionregistration.k8s.io kind: ValidatingWebhookConfiguration diff --git a/config/webhook/webhookpatch.json b/config/webhook/webhookpatch.json deleted file mode 100644 index 56db3e148..000000000 --- a/config/webhook/webhookpatch.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "op": "replace", - "path": "/webhooks/0/clientConfig/service/name", - "value": "kusk-gateway-webhooks-service" - }, - { - "op": "replace", - "path": "/webhooks/1/clientConfig/service/name", - "value": "kusk-gateway-webhooks-service" - } -] From 318b3b19bbe52304deb5cf28dff012c2fe67589a Mon Sep 17 00:00:00 2001 From: Taras Yatsurak Date: Mon, 13 Dec 2021 11:49:32 +0200 Subject: [PATCH 8/8] Revert removal of configmanager --- controllers/envoyfleet_controller.go | 3 ++- main.go | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/controllers/envoyfleet_controller.go b/controllers/envoyfleet_controller.go index 1e9991348..188fd389a 100644 --- a/controllers/envoyfleet_controller.go +++ b/controllers/envoyfleet_controller.go @@ -50,7 +50,8 @@ const ( // EnvoyFleetReconciler reconciles a EnvoyFleet object type EnvoyFleetReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ConfigManager *KubeEnvoyConfigManager } // +kubebuilder:rbac:groups=gateway.kusk.io,resources=envoyfleet,verbs=get;list;watch;create;update;patch;delete diff --git a/main.go b/main.go index 13201bf0c..bde6ffa30 100644 --- a/main.go +++ b/main.go @@ -116,8 +116,9 @@ func main() { } if err = (&controllers.EnvoyFleetReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ConfigManager: &controllerConfigManager, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "EnvoyFleet") os.Exit(1)