From 3f9ef2209858a5e909cc006cc2eb5d2e1d6c8443 Mon Sep 17 00:00:00 2001 From: zhenggu1 Date: Sun, 26 Dec 2021 23:25:13 +0800 Subject: [PATCH 1/2] ingress: add init implementation of YurtIngress operator YurtIngress operator is used to control and manage ingress controller in NodePools, user can enable/disable ingress feature of multi NodePools through the singleton YurtIngress CR, the init version only contains some basic functions such as which NodePools to enable ingress, ingress controller replicas for every pool, and it can be extended to support other functions according to user requirements in future. Signed-off-by: zhenggu1 --- cmd/yurt-app-manager/app/core.go | 6 + .../bases/apps.openyurt.io_yurtingresses.yaml | 157 ++++ .../yurt-app-manager/crd/kustomization.yaml | 1 + .../patches/cainjection_in_yurtingresses.yaml | 8 + .../crd/patches/webhook_in_yurtingresses.yaml | 17 + config/yurt-app-manager/rbac/role.yaml | 104 +++ .../rbac/yurtingress_editor_role.yaml | 24 + .../rbac/yurtingress_viewer_role.yaml | 20 + .../apis/apps/v1alpha1/yurtingress_types.go | 149 ++++ .../apps/v1alpha1/zz_generated.deepcopy.go | 173 ++++ .../typed/apps/v1alpha1/apps_client.go | 5 + .../apps/v1alpha1/fake/fake_apps_client.go | 4 + .../apps/v1alpha1/fake/fake_yurtingress.go | 133 +++ .../apps/v1alpha1/generated_expansion.go | 2 + .../typed/apps/v1alpha1/yurtingress.go | 184 ++++ .../apps/v1alpha1/interface.go | 7 + .../apps/v1alpha1/yurtingress.go | 89 ++ .../informers/externalversions/generic.go | 2 + .../apps/v1alpha1/expansion_generated.go | 4 + .../listers/apps/v1alpha1/yurtingress.go | 68 ++ .../constant/nginx-ingress-controller-tmpl.go | 821 ++++++++++++++++++ pkg/yurtappmanager/controller/controllers.go | 3 +- .../yurtingress/yurtingress_controller.go | 429 +++++++++ .../util/kubernetes/apply_addons.go | 294 +++++++ pkg/yurtappmanager/util/kubernetes/util.go | 576 ++++++++++++ 25 files changed, 3279 insertions(+), 1 deletion(-) create mode 100644 config/yurt-app-manager/crd/bases/apps.openyurt.io_yurtingresses.yaml create mode 100644 config/yurt-app-manager/crd/patches/cainjection_in_yurtingresses.yaml create mode 100644 config/yurt-app-manager/crd/patches/webhook_in_yurtingresses.yaml create mode 100644 config/yurt-app-manager/rbac/yurtingress_editor_role.yaml create mode 100644 config/yurt-app-manager/rbac/yurtingress_viewer_role.yaml create mode 100644 pkg/yurtappmanager/apis/apps/v1alpha1/yurtingress_types.go create mode 100644 pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_yurtingress.go create mode 100644 pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/yurtingress.go create mode 100644 pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/yurtingress.go create mode 100644 pkg/yurtappmanager/client/listers/apps/v1alpha1/yurtingress.go create mode 100644 pkg/yurtappmanager/constant/nginx-ingress-controller-tmpl.go create mode 100644 pkg/yurtappmanager/controller/yurtingress/yurtingress_controller.go create mode 100644 pkg/yurtappmanager/util/kubernetes/apply_addons.go create mode 100644 pkg/yurtappmanager/util/kubernetes/util.go diff --git a/cmd/yurt-app-manager/app/core.go b/cmd/yurt-app-manager/app/core.go index b6b9cb6..196d4a3 100644 --- a/cmd/yurt-app-manager/app/core.go +++ b/cmd/yurt-app-manager/app/core.go @@ -40,6 +40,7 @@ import ( "k8s.io/klog" "k8s.io/klog/klogr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" // +kubebuilder:scaffold:imports ) @@ -105,6 +106,10 @@ func Run(opts *options.YurtAppOptions) { cfg := ctrl.GetConfigOrDie() setRestConfig(cfg) + cacheDisableObjs := []client.Object{ + &appsv1alpha1.YurtIngress{}, + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: opts.MetricsAddr, @@ -114,6 +119,7 @@ func Run(opts *options.YurtAppOptions) { LeaderElectionNamespace: opts.LeaderElectionNamespace, LeaderElectionResourceLock: resourcelock.LeasesResourceLock, // use lease to election Namespace: opts.Namespace, + ClientDisableCacheFor: cacheDisableObjs, }) if err != nil { setupLog.Error(err, "unable to start manager") diff --git a/config/yurt-app-manager/crd/bases/apps.openyurt.io_yurtingresses.yaml b/config/yurt-app-manager/crd/bases/apps.openyurt.io_yurtingresses.yaml new file mode 100644 index 0000000..438f5f5 --- /dev/null +++ b/config/yurt-app-manager/crd/bases/apps.openyurt.io_yurtingresses.yaml @@ -0,0 +1,157 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.9 + creationTimestamp: null + name: yurtingresses.apps.openyurt.io +spec: + additionalPrinterColumns: + - JSONPath: .status.nginx_ingress_controller_version + description: The nginx ingress controller version + name: Nginx-Ingress-Version + type: string + - JSONPath: .status.ingress_controller_replicas_per_pool + description: The nginx ingress controller replicas per pool + name: Replicas-Per-Pool + type: integer + - JSONPath: .status.readyNum + description: The number of pools on which ingress is enabled + name: ReadyNum + type: integer + - JSONPath: .status.unreadyNum + description: The number of pools on which ingress is enabling or enable failed + name: NotReadyNum + type: integer + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: apps.openyurt.io + names: + categories: + - all + kind: YurtIngress + listKind: YurtIngressList + plural: yurtingresses + shortNames: + - ying + singular: yurtingress + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: YurtIngress is the Schema for the yurtingresses API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: YurtIngressSpec defines the desired state of YurtIngress + properties: + ingress_controller_replicas_per_pool: + description: Indicates the number of the ingress controllers to be deployed + under all the specified nodepools. + format: int32 + type: integer + pools: + description: Indicates all the nodepools on which to enable ingress. + items: + description: IngressPool defines the details of a Pool for ingress + properties: + name: + description: Indicates the pool name. + type: string + required: + - name + type: object + type: array + type: object + status: + description: YurtIngressStatus defines the observed state of YurtIngress + properties: + conditions: + description: Indicates all the nodepools on which to enable ingress. + properties: + ingressreadypools: + description: Indicates the pools that ingress controller is deployed + successfully. + items: + type: string + type: array + ingressunreadypools: + description: Indicates the pools that ingress controller is being + deployed or deployed failed. + items: + description: IngressNotReadyPool defines the condition details + of an ingress not ready Pool + properties: + name: + description: Indicates the pool name. + type: string + poolinfo: + description: Info of ingress not ready condition. + properties: + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details + about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + type: + description: Type of ingress not ready condition. + type: string + type: object + required: + - name + type: object + type: array + type: object + ingress_controller_replicas_per_pool: + description: Indicates the number of the ingress controllers deployed + under all the specified nodepools. + format: int32 + type: integer + nginx_ingress_controller_version: + description: Indicates the nginx ingress controller version deployed + under all the specified nodepools. + type: string + readyNum: + description: Total number of ready pools on which ingress is enabled. + format: int32 + type: integer + unreadyNum: + description: Total number of unready pools on which ingress is enabling + or enable failed. + format: int32 + type: integer + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/yurt-app-manager/crd/kustomization.yaml b/config/yurt-app-manager/crd/kustomization.yaml index 73ac1b8..91f7216 100644 --- a/config/yurt-app-manager/crd/kustomization.yaml +++ b/config/yurt-app-manager/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/apps.openyurt.io_uniteddeployments.yaml - bases/apps.openyurt.io_nodepools.yaml - bases/apps.openyurt.io_yurtappdaemons.yaml +- bases/apps.openyurt.io_yurtingresses.yaml patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. diff --git a/config/yurt-app-manager/crd/patches/cainjection_in_yurtingresses.yaml b/config/yurt-app-manager/crd/patches/cainjection_in_yurtingresses.yaml new file mode 100644 index 0000000..2aa6e28 --- /dev/null +++ b/config/yurt-app-manager/crd/patches/cainjection_in_yurtingresses.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: yurtingresses.apps.openyurt.io diff --git a/config/yurt-app-manager/crd/patches/webhook_in_yurtingresses.yaml b/config/yurt-app-manager/crd/patches/webhook_in_yurtingresses.yaml new file mode 100644 index 0000000..b5de3d1 --- /dev/null +++ b/config/yurt-app-manager/crd/patches/webhook_in_yurtingresses.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: yurtingresses.apps.openyurt.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: kube-system + name: webhook-service + path: /convert diff --git a/config/yurt-app-manager/rbac/role.yaml b/config/yurt-app-manager/rbac/role.yaml index af6b6d7..6b1cb4b 100644 --- a/config/yurt-app-manager/rbac/role.yaml +++ b/config/yurt-app-manager/rbac/role.yaml @@ -142,6 +142,38 @@ rules: - get - patch - update +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses/status + verbs: + - get + - patch + - update +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: @@ -154,6 +186,18 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -166,6 +210,18 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -212,3 +268,51 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - '*' diff --git a/config/yurt-app-manager/rbac/yurtingress_editor_role.yaml b/config/yurt-app-manager/rbac/yurtingress_editor_role.yaml new file mode 100644 index 0000000..a19af8a --- /dev/null +++ b/config/yurt-app-manager/rbac/yurtingress_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit yurtingresses. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: yurtingress-editor-role +rules: +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses/status + verbs: + - get diff --git a/config/yurt-app-manager/rbac/yurtingress_viewer_role.yaml b/config/yurt-app-manager/rbac/yurtingress_viewer_role.yaml new file mode 100644 index 0000000..4665402 --- /dev/null +++ b/config/yurt-app-manager/rbac/yurtingress_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view yurtingresses. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: yurtingress-viewer-role +rules: +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses + verbs: + - get + - list + - watch +- apiGroups: + - apps.openyurt.io + resources: + - yurtingresses/status + verbs: + - get diff --git a/pkg/yurtappmanager/apis/apps/v1alpha1/yurtingress_types.go b/pkg/yurtappmanager/apis/apps/v1alpha1/yurtingress_types.go new file mode 100644 index 0000000..66223ac --- /dev/null +++ b/pkg/yurtappmanager/apis/apps/v1alpha1/yurtingress_types.go @@ -0,0 +1,149 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Define the default nodepool ingress related values +const ( + // DefaultIngressControllerReplicasPerPool defines the default ingress controller replicas per pool + DefaultIngressControllerReplicasPerPool int32 = 1 + // NginxIngressControllerVersion defines the nginx ingress controller version + NginxIngressControllerVersion = "0.48.1" + // SingletonYurtIngressInstanceName defines the singleton instance name of YurtIngress + SingletonYurtIngressInstanceName = "yurtingress-singleton" + // YurtIngressFinalizer is used to cleanup ingress resources when singleton YurtIngress CR is deleted + YurtIngressFinalizer = "ingress.operator.openyurt.io" +) + +type IngressNotReadyType string + +const ( + IngressPending IngressNotReadyType = "Pending" + IngressFailure IngressNotReadyType = "Failure" +) + +// IngressPool defines the details of a Pool for ingress +type IngressPool struct { + // Indicates the pool name. + Name string `json:"name"` + + // Pool specific configuration will be supported in future. +} + +// IngressNotReadyConditionInfo defines the details info of an ingress not ready Pool +type IngressNotReadyConditionInfo struct { + // Type of ingress not ready condition. + Type IngressNotReadyType `json:"type,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty"` +} + +// IngressNotReadyPool defines the condition details of an ingress not ready Pool +type IngressNotReadyPool struct { + // Indicates the pool name. + Name string `json:"name"` + + // Info of ingress not ready condition. + Info *IngressNotReadyConditionInfo `json:"poolinfo,omitempty"` +} + +// YurtIngressSpec defines the desired state of YurtIngress +type YurtIngressSpec struct { + // Indicates the number of the ingress controllers to be deployed under all the specified nodepools. + // +optional + Replicas int32 `json:"ingress_controller_replicas_per_pool,omitempty"` + + // Indicates all the nodepools on which to enable ingress. + // +optional + Pools []IngressPool `json:"pools,omitempty"` +} + +// YurtIngressCondition describes current state of a YurtIngress +type YurtIngressCondition struct { + // Indicates the pools that ingress controller is deployed successfully. + IngressReadyPools []string `json:"ingressreadypools,omitempty"` + + // Indicates the pools that ingress controller is being deployed or deployed failed. + IngressNotReadyPools []IngressNotReadyPool `json:"ingressunreadypools,omitempty"` +} + +// YurtIngressStatus defines the observed state of YurtIngress +type YurtIngressStatus struct { + // Indicates the number of the ingress controllers deployed under all the specified nodepools. + // +optional + Replicas int32 `json:"ingress_controller_replicas_per_pool,omitempty"` + + // Indicates all the nodepools on which to enable ingress. + // +optional + Conditions YurtIngressCondition `json:"conditions,omitempty"` + + // Indicates the nginx ingress controller version deployed under all the specified nodepools. + // +optional + Version string `json:"nginx_ingress_controller_version,omitempty"` + + // Total number of ready pools on which ingress is enabled. + // +optional + ReadyNum int32 `json:"readyNum"` + + // Total number of unready pools on which ingress is enabling or enable failed. + // +optional + UnreadyNum int32 `json:"unreadyNum"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,path=yurtingresses,shortName=ying,categories=all +// +kubebuilder:printcolumn:name="Nginx-Ingress-Version",type="string",JSONPath=".status.nginx_ingress_controller_version",description="The nginx ingress controller version" +// +kubebuilder:printcolumn:name="Replicas-Per-Pool",type="integer",JSONPath=".status.ingress_controller_replicas_per_pool",description="The nginx ingress controller replicas per pool" +// +kubebuilder:printcolumn:name="ReadyNum",type="integer",JSONPath=".status.readyNum",description="The number of pools on which ingress is enabled" +// +kubebuilder:printcolumn:name="NotReadyNum",type="integer",JSONPath=".status.unreadyNum",description="The number of pools on which ingress is enabling or enable failed" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +genclient:nonNamespaced +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// YurtIngress is the Schema for the yurtingresses API +type YurtIngress struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec YurtIngressSpec `json:"spec,omitempty"` + Status YurtIngressStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// YurtIngressList contains a list of YurtIngress +type YurtIngressList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []YurtIngress `json:"items"` +} + +func init() { + SchemeBuilder.Register(&YurtIngress{}, &YurtIngressList{}) +} diff --git a/pkg/yurtappmanager/apis/apps/v1alpha1/zz_generated.deepcopy.go b/pkg/yurtappmanager/apis/apps/v1alpha1/zz_generated.deepcopy.go index 17d2af8..a346295 100644 --- a/pkg/yurtappmanager/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/yurtappmanager/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -43,6 +43,57 @@ func (in *DeploymentTemplateSpec) DeepCopy() *DeploymentTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressNotReadyConditionInfo) DeepCopyInto(out *IngressNotReadyConditionInfo) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressNotReadyConditionInfo. +func (in *IngressNotReadyConditionInfo) DeepCopy() *IngressNotReadyConditionInfo { + if in == nil { + return nil + } + out := new(IngressNotReadyConditionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressNotReadyPool) DeepCopyInto(out *IngressNotReadyPool) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = new(IngressNotReadyConditionInfo) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressNotReadyPool. +func (in *IngressNotReadyPool) DeepCopy() *IngressNotReadyPool { + if in == nil { + return nil + } + out := new(IngressNotReadyPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPool) DeepCopyInto(out *IngressPool) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPool. +func (in *IngressPool) DeepCopy() *IngressPool { + if in == nil { + return nil + } + out := new(IngressPool) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePool) DeepCopyInto(out *NodePool) { *out = *in @@ -533,3 +584,125 @@ func (in *YurtAppDaemonStatus) DeepCopy() *YurtAppDaemonStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YurtIngress) DeepCopyInto(out *YurtIngress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YurtIngress. +func (in *YurtIngress) DeepCopy() *YurtIngress { + if in == nil { + return nil + } + out := new(YurtIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *YurtIngress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YurtIngressCondition) DeepCopyInto(out *YurtIngressCondition) { + *out = *in + if in.IngressReadyPools != nil { + in, out := &in.IngressReadyPools, &out.IngressReadyPools + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressNotReadyPools != nil { + in, out := &in.IngressNotReadyPools, &out.IngressNotReadyPools + *out = make([]IngressNotReadyPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YurtIngressCondition. +func (in *YurtIngressCondition) DeepCopy() *YurtIngressCondition { + if in == nil { + return nil + } + out := new(YurtIngressCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YurtIngressList) DeepCopyInto(out *YurtIngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]YurtIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YurtIngressList. +func (in *YurtIngressList) DeepCopy() *YurtIngressList { + if in == nil { + return nil + } + out := new(YurtIngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *YurtIngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YurtIngressSpec) DeepCopyInto(out *YurtIngressSpec) { + *out = *in + if in.Pools != nil { + in, out := &in.Pools, &out.Pools + *out = make([]IngressPool, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YurtIngressSpec. +func (in *YurtIngressSpec) DeepCopy() *YurtIngressSpec { + if in == nil { + return nil + } + out := new(YurtIngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YurtIngressStatus) DeepCopyInto(out *YurtIngressStatus) { + *out = *in + in.Conditions.DeepCopyInto(&out.Conditions) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YurtIngressStatus. +func (in *YurtIngressStatus) DeepCopy() *YurtIngressStatus { + if in == nil { + return nil + } + out := new(YurtIngressStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/apps_client.go b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/apps_client.go index d2feb52..ca0c6db 100644 --- a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/apps_client.go +++ b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/apps_client.go @@ -29,6 +29,7 @@ type AppsV1alpha1Interface interface { NodePoolsGetter UnitedDeploymentsGetter YurtAppDaemonsGetter + YurtIngressesGetter } // AppsV1alpha1Client is used to interact with features provided by the apps.openyurt.io group. @@ -48,6 +49,10 @@ func (c *AppsV1alpha1Client) YurtAppDaemons(namespace string) YurtAppDaemonInter return newYurtAppDaemons(c, namespace) } +func (c *AppsV1alpha1Client) YurtIngresses() YurtIngressInterface { + return newYurtIngresses(c) +} + // NewForConfig creates a new AppsV1alpha1Client for the given config. func NewForConfig(c *rest.Config) (*AppsV1alpha1Client, error) { config := *c diff --git a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_apps_client.go b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_apps_client.go index f04fa76..a65ffec 100644 --- a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_apps_client.go +++ b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_apps_client.go @@ -40,6 +40,10 @@ func (c *FakeAppsV1alpha1) YurtAppDaemons(namespace string) v1alpha1.YurtAppDaem return &FakeYurtAppDaemons{c, namespace} } +func (c *FakeAppsV1alpha1) YurtIngresses() v1alpha1.YurtIngressInterface { + return &FakeYurtIngresses{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeAppsV1alpha1) RESTClient() rest.Interface { diff --git a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_yurtingress.go b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_yurtingress.go new file mode 100644 index 0000000..780535c --- /dev/null +++ b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/fake/fake_yurtingress.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeYurtIngresses implements YurtIngressInterface +type FakeYurtIngresses struct { + Fake *FakeAppsV1alpha1 +} + +var yurtingressesResource = schema.GroupVersionResource{Group: "apps.openyurt.io", Version: "v1alpha1", Resource: "yurtingresses"} + +var yurtingressesKind = schema.GroupVersionKind{Group: "apps.openyurt.io", Version: "v1alpha1", Kind: "YurtIngress"} + +// Get takes name of the yurtIngress, and returns the corresponding yurtIngress object, and an error if there is any. +func (c *FakeYurtIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.YurtIngress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(yurtingressesResource, name), &v1alpha1.YurtIngress{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.YurtIngress), err +} + +// List takes label and field selectors, and returns the list of YurtIngresses that match those selectors. +func (c *FakeYurtIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.YurtIngressList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(yurtingressesResource, yurtingressesKind, opts), &v1alpha1.YurtIngressList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.YurtIngressList{ListMeta: obj.(*v1alpha1.YurtIngressList).ListMeta} + for _, item := range obj.(*v1alpha1.YurtIngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested yurtIngresses. +func (c *FakeYurtIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(yurtingressesResource, opts)) +} + +// Create takes the representation of a yurtIngress and creates it. Returns the server's representation of the yurtIngress, and an error, if there is any. +func (c *FakeYurtIngresses) Create(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.CreateOptions) (result *v1alpha1.YurtIngress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(yurtingressesResource, yurtIngress), &v1alpha1.YurtIngress{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.YurtIngress), err +} + +// Update takes the representation of a yurtIngress and updates it. Returns the server's representation of the yurtIngress, and an error, if there is any. +func (c *FakeYurtIngresses) Update(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (result *v1alpha1.YurtIngress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(yurtingressesResource, yurtIngress), &v1alpha1.YurtIngress{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.YurtIngress), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeYurtIngresses) UpdateStatus(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (*v1alpha1.YurtIngress, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(yurtingressesResource, "status", yurtIngress), &v1alpha1.YurtIngress{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.YurtIngress), err +} + +// Delete takes name of the yurtIngress and deletes it. Returns an error if one occurs. +func (c *FakeYurtIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(yurtingressesResource, name), &v1alpha1.YurtIngress{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeYurtIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(yurtingressesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.YurtIngressList{}) + return err +} + +// Patch applies the patch and returns the patched yurtIngress. +func (c *FakeYurtIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.YurtIngress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(yurtingressesResource, name, pt, data, subresources...), &v1alpha1.YurtIngress{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.YurtIngress), err +} diff --git a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/generated_expansion.go b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/generated_expansion.go index e8805b4..5f7e2d6 100644 --- a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/generated_expansion.go +++ b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/generated_expansion.go @@ -23,3 +23,5 @@ type NodePoolExpansion interface{} type UnitedDeploymentExpansion interface{} type YurtAppDaemonExpansion interface{} + +type YurtIngressExpansion interface{} diff --git a/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/yurtingress.go b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/yurtingress.go new file mode 100644 index 0000000..c7a7f84 --- /dev/null +++ b/pkg/yurtappmanager/client/clientset/versioned/typed/apps/v1alpha1/yurtingress.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + scheme "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// YurtIngressesGetter has a method to return a YurtIngressInterface. +// A group's client should implement this interface. +type YurtIngressesGetter interface { + YurtIngresses() YurtIngressInterface +} + +// YurtIngressInterface has methods to work with YurtIngress resources. +type YurtIngressInterface interface { + Create(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.CreateOptions) (*v1alpha1.YurtIngress, error) + Update(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (*v1alpha1.YurtIngress, error) + UpdateStatus(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (*v1alpha1.YurtIngress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.YurtIngress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.YurtIngressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.YurtIngress, err error) + YurtIngressExpansion +} + +// yurtIngresses implements YurtIngressInterface +type yurtIngresses struct { + client rest.Interface +} + +// newYurtIngresses returns a YurtIngresses +func newYurtIngresses(c *AppsV1alpha1Client) *yurtIngresses { + return &yurtIngresses{ + client: c.RESTClient(), + } +} + +// Get takes name of the yurtIngress, and returns the corresponding yurtIngress object, and an error if there is any. +func (c *yurtIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.YurtIngress, err error) { + result = &v1alpha1.YurtIngress{} + err = c.client.Get(). + Resource("yurtingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of YurtIngresses that match those selectors. +func (c *yurtIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.YurtIngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.YurtIngressList{} + err = c.client.Get(). + Resource("yurtingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested yurtIngresses. +func (c *yurtIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("yurtingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a yurtIngress and creates it. Returns the server's representation of the yurtIngress, and an error, if there is any. +func (c *yurtIngresses) Create(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.CreateOptions) (result *v1alpha1.YurtIngress, err error) { + result = &v1alpha1.YurtIngress{} + err = c.client.Post(). + Resource("yurtingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(yurtIngress). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a yurtIngress and updates it. Returns the server's representation of the yurtIngress, and an error, if there is any. +func (c *yurtIngresses) Update(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (result *v1alpha1.YurtIngress, err error) { + result = &v1alpha1.YurtIngress{} + err = c.client.Put(). + Resource("yurtingresses"). + Name(yurtIngress.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(yurtIngress). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *yurtIngresses) UpdateStatus(ctx context.Context, yurtIngress *v1alpha1.YurtIngress, opts v1.UpdateOptions) (result *v1alpha1.YurtIngress, err error) { + result = &v1alpha1.YurtIngress{} + err = c.client.Put(). + Resource("yurtingresses"). + Name(yurtIngress.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(yurtIngress). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the yurtIngress and deletes it. Returns an error if one occurs. +func (c *yurtIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("yurtingresses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *yurtIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("yurtingresses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched yurtIngress. +func (c *yurtIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.YurtIngress, err error) { + result = &v1alpha1.YurtIngress{} + err = c.client.Patch(pt). + Resource("yurtingresses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/interface.go b/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/interface.go index 662f921..1f62f85 100644 --- a/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/interface.go +++ b/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/interface.go @@ -30,6 +30,8 @@ type Interface interface { UnitedDeployments() UnitedDeploymentInformer // YurtAppDaemons returns a YurtAppDaemonInformer. YurtAppDaemons() YurtAppDaemonInformer + // YurtIngresses returns a YurtIngressInformer. + YurtIngresses() YurtIngressInformer } type version struct { @@ -57,3 +59,8 @@ func (v *version) UnitedDeployments() UnitedDeploymentInformer { func (v *version) YurtAppDaemons() YurtAppDaemonInformer { return &yurtAppDaemonInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } + +// YurtIngresses returns a YurtIngressInformer. +func (v *version) YurtIngresses() YurtIngressInformer { + return &yurtIngressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/yurtingress.go b/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/yurtingress.go new file mode 100644 index 0000000..9ada0c1 --- /dev/null +++ b/pkg/yurtappmanager/client/informers/externalversions/apps/v1alpha1/yurtingress.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + versioned "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/client/clientset/versioned" + internalinterfaces "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/client/listers/apps/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// YurtIngressInformer provides access to a shared informer and lister for +// YurtIngresses. +type YurtIngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.YurtIngressLister +} + +type yurtIngressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewYurtIngressInformer constructs a new informer for YurtIngress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewYurtIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredYurtIngressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredYurtIngressInformer constructs a new informer for YurtIngress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredYurtIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1alpha1().YurtIngresses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1alpha1().YurtIngresses().Watch(context.TODO(), options) + }, + }, + &appsv1alpha1.YurtIngress{}, + resyncPeriod, + indexers, + ) +} + +func (f *yurtIngressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredYurtIngressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *yurtIngressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&appsv1alpha1.YurtIngress{}, f.defaultInformer) +} + +func (f *yurtIngressInformer) Lister() v1alpha1.YurtIngressLister { + return v1alpha1.NewYurtIngressLister(f.Informer().GetIndexer()) +} diff --git a/pkg/yurtappmanager/client/informers/externalversions/generic.go b/pkg/yurtappmanager/client/informers/externalversions/generic.go index f13345c..4ad9d1c 100644 --- a/pkg/yurtappmanager/client/informers/externalversions/generic.go +++ b/pkg/yurtappmanager/client/informers/externalversions/generic.go @@ -59,6 +59,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1alpha1().UnitedDeployments().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("yurtappdaemons"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1alpha1().YurtAppDaemons().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("yurtingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1alpha1().YurtIngresses().Informer()}, nil } diff --git a/pkg/yurtappmanager/client/listers/apps/v1alpha1/expansion_generated.go b/pkg/yurtappmanager/client/listers/apps/v1alpha1/expansion_generated.go index de3fc7e..4cd31e8 100644 --- a/pkg/yurtappmanager/client/listers/apps/v1alpha1/expansion_generated.go +++ b/pkg/yurtappmanager/client/listers/apps/v1alpha1/expansion_generated.go @@ -37,3 +37,7 @@ type YurtAppDaemonListerExpansion interface{} // YurtAppDaemonNamespaceListerExpansion allows custom methods to be added to // YurtAppDaemonNamespaceLister. type YurtAppDaemonNamespaceListerExpansion interface{} + +// YurtIngressListerExpansion allows custom methods to be added to +// YurtIngressLister. +type YurtIngressListerExpansion interface{} diff --git a/pkg/yurtappmanager/client/listers/apps/v1alpha1/yurtingress.go b/pkg/yurtappmanager/client/listers/apps/v1alpha1/yurtingress.go new file mode 100644 index 0000000..1531ac0 --- /dev/null +++ b/pkg/yurtappmanager/client/listers/apps/v1alpha1/yurtingress.go @@ -0,0 +1,68 @@ +/* +Copyright 2020 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// YurtIngressLister helps list YurtIngresses. +// All objects returned here must be treated as read-only. +type YurtIngressLister interface { + // List lists all YurtIngresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.YurtIngress, err error) + // Get retrieves the YurtIngress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.YurtIngress, error) + YurtIngressListerExpansion +} + +// yurtIngressLister implements the YurtIngressLister interface. +type yurtIngressLister struct { + indexer cache.Indexer +} + +// NewYurtIngressLister returns a new YurtIngressLister. +func NewYurtIngressLister(indexer cache.Indexer) YurtIngressLister { + return &yurtIngressLister{indexer: indexer} +} + +// List lists all YurtIngresses in the indexer. +func (s *yurtIngressLister) List(selector labels.Selector) (ret []*v1alpha1.YurtIngress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.YurtIngress)) + }) + return ret, err +} + +// Get retrieves the YurtIngress from the index for a given name. +func (s *yurtIngressLister) Get(name string) (*v1alpha1.YurtIngress, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("yurtingress"), name) + } + return obj.(*v1alpha1.YurtIngress), nil +} diff --git a/pkg/yurtappmanager/constant/nginx-ingress-controller-tmpl.go b/pkg/yurtappmanager/constant/nginx-ingress-controller-tmpl.go new file mode 100644 index 0000000..43dcc9a --- /dev/null +++ b/pkg/yurtappmanager/constant/nginx-ingress-controller-tmpl.go @@ -0,0 +1,821 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constant + +const ( + NginxIngressControllerNamespace = ` +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx +` + NginxIngressControllerClusterRole = ` +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +` + NginxIngressAdmissionWebhookClusterRole = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingress-nginx-admission + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +` + NginxIngressControllerServiceAccount = ` +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +automountServiceAccountToken: true +` + NginxIngressControllerConfigMap = ` +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: +` + NginxIngressControllerClusterRoleBinding = ` +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +` + NginxIngressControllerRole = ` +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +` + NginxIngressControllerRoleBinding = ` +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +` + NginxIngressAdmissionWebhookService = ` +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller-webhook + name: {{.nodepool_name}}-ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/component: controller-webhook + yurtingress.io/nodepool: {{.nodepool_name}} +` + NginxIngressControllerService = ` +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: {{.nodepool_name}}-ingress-nginx-controller + namespace: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + yurtingress.io/nodepool: {{.nodepool_name}} +` + NginxIngressControllerNodePoolDeployment = ` +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + yurtingress.io/nodepool: {{.nodepool_name}} + name: {{.nodepool_name}}-ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + yurtingress.io/nodepool: {{.nodepool_name}} + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + yurtingress.io/nodepool: {{.nodepool_name}} + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: k8s.gcr.io/ingress-nginx/controller:v0.48.1@sha256:e9fb216ace49dfa4a5983b183067e97496e7a8b307d2093f4278cd550c303899 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader-edge + - --ingress-class={{.nodepool_name}} + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + apps.openyurt.io/nodepool: {{.nodepool_name}} + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 +` + NginxIngressAdmissionWebhookDeployment = ` +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller-webhook + name: {{.nodepool_name}}-ingress-nginx-admission-webhook + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/component: controller-webhook + yurtingress.io/nodepool: {{.nodepool_name}} + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/component: controller-webhook + yurtingress.io/nodepool: {{.nodepool_name}} + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: k8s.gcr.io/ingress-nginx/controller:v0.48.1@sha256:e9fb216ace49dfa4a5983b183067e97496e7a8b307d2093f4278cd550c303899 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader-webhook + - --ingress-class={{.nodepool_name}} + - --update-status=false + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + openyurt.io/is-edge-worker: "false" + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: webhook-cert + secret: + secretName: {{.nodepool_name}}-ingress-nginx-admission +` + NginxIngressValidatingWebhookConfiguration = ` +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: {{.nodepool_name}}-ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + namespace: ingress-nginx + name: {{.nodepool_name}}-ingress-nginx-controller-admission + path: /networking/v1beta1/ingresses +` + NginxIngressAdmissionWebhookServiceAccount = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +` + NginxIngressAdmissionWebhookClusterRoleBinding = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx-admission + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +` + NginxIngressAdmissionWebhookRole = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - '' + resources: + - secrets + verbs: + - get + - create +` + NginxIngressAdmissionWebhookRoleBinding = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +` + NginxIngressAdmissionWebhookJob = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.nodepool_name}}-ingress-nginx-admission-create + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: {{.nodepool_name}}-ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: docker.io/jettech/kube-webhook-certgen:v1.5.1 + imagePullPolicy: IfNotPresent + args: + - create + - --host={{.nodepool_name}}-ingress-nginx-controller-admission,{{.nodepool_name}}-ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{.nodepool_name}}-ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + openyurt.io/is-edge-worker: "false" + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + securityContext: + runAsNonRoot: true + runAsUser: 2000 +` + NginxIngressAdmissionWebhookJobPatch = ` +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: {{.nodepool_name}}-ingress-nginx-admission-patch + namespace: ingress-nginx + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: {{.nodepool_name}}-ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-3.34.0 + app.kubernetes.io/name: ingress-nginx-webhook + app.kubernetes.io/instance: ingress-nginx-webhook + app.kubernetes.io/version: 0.48.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: docker.io/jettech/kube-webhook-certgen:v1.5.1 + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name={{.nodepool_name}}-ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{.nodepool_name}}-ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + openyurt.io/is-edge-worker: "false" + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + securityContext: + runAsNonRoot: true + runAsUser: 2000 +` +) diff --git a/pkg/yurtappmanager/controller/controllers.go b/pkg/yurtappmanager/controller/controllers.go index ae6a4b1..5f8effe 100644 --- a/pkg/yurtappmanager/controller/controllers.go +++ b/pkg/yurtappmanager/controller/controllers.go @@ -27,12 +27,13 @@ import ( "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/controller/nodepool" "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/controller/uniteddeployment" yurtappdaemon "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/controller/yurtappdaemon" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/controller/yurtingress" ) var controllerAddFuncs []func(manager.Manager, context.Context) error func init() { - controllerAddFuncs = append(controllerAddFuncs, uniteddeployment.Add, nodepool.Add, yurtappdaemon.Add) + controllerAddFuncs = append(controllerAddFuncs, uniteddeployment.Add, nodepool.Add, yurtappdaemon.Add, yurtingress.Add) } func SetupWithManager(m manager.Manager, ctx context.Context) error { diff --git a/pkg/yurtappmanager/controller/yurtingress/yurtingress_controller.go b/pkg/yurtappmanager/controller/yurtingress/yurtingress_controller.go new file mode 100644 index 0000000..f3ce52a --- /dev/null +++ b/pkg/yurtappmanager/controller/yurtingress/yurtingress_controller.go @@ -0,0 +1,429 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yurtingress + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/util/gate" + yurtapputil "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/util/kubernetes" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/util/refmanager" + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + controllerName = "yurtingress-controller" + ingressDeploymentLabel = "yurtingress.io/nodepool" +) + +const updateRetries = 5 + +// YurtIngressReconciler reconciles a YurtIngress object +type YurtIngressReconciler struct { + client.Client + Scheme *runtime.Scheme + recorder record.EventRecorder +} + +// Add creates a new YurtIngress Controller and adds it to the Manager with default RBAC. +// The Manager will set fields on the Controller and start it when the Manager is started. +func Add(mgr manager.Manager, ctx context.Context) error { + if !gate.ResourceEnabled(&appsv1alpha1.YurtIngress{}) { + return nil + } + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +//func newReconciler(mgr manager.Manager, createSingletonPoolIngress bool) reconcile.Reconciler { +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &YurtIngressReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor(controllerName), + } +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + // Watch for changes to YurtIngress + err = c.Watch(&source.Kind{Type: &appsv1alpha1.YurtIngress{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &appsv1alpha1.YurtIngress{}, + }) + if err != nil { + return err + } + return nil +} + +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=yurtingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=yurtingresses/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=* +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=* +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=* +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=* + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *YurtIngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + klog.V(4).Infof("Reconcile YurtIngress: %s", req.Name) + if req.Name != appsv1alpha1.SingletonYurtIngressInstanceName { + return ctrl.Result{}, nil + } + // Fetch the YurtIngress instance + instance := &appsv1alpha1.YurtIngress{} + err := r.Get(context.TODO(), req.NamespacedName, instance) + if err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + // Add finalizer if not exist + if !controllerutil.ContainsFinalizer(instance, appsv1alpha1.YurtIngressFinalizer) { + controllerutil.AddFinalizer(instance, appsv1alpha1.YurtIngressFinalizer) + if err := r.Update(context.TODO(), instance); err != nil { + return ctrl.Result{}, err + } + } + // Handle ingress controller resources cleanup + if !instance.ObjectMeta.DeletionTimestamp.IsZero() { + return r.cleanupIngressResources(instance) + } + // Set the default version at current stage + instance.Status.Version = appsv1alpha1.NginxIngressControllerVersion + + var desiredPoolNames, currentPoolNames []string + desiredPoolNames = getDesiredPoolNames(instance) + currentPoolNames = getCurrentPoolNames(instance) + isIngressCRChanged := false + addedPools, removedPools, unchangedPools := getPools(desiredPoolNames, currentPoolNames) + if addedPools != nil { + klog.V(4).Infof("added pool list is %s", addedPools) + isIngressCRChanged = true + ownerRef := prepareDeploymentOwnerReferences(instance) + if currentPoolNames == nil { + if err := yurtapputil.CreateNginxIngressCommonResource(r.Client); err != nil { + return ctrl.Result{}, err + } + } + for _, pool := range addedPools { + replicas := instance.Spec.Replicas + if err := yurtapputil.CreateNginxIngressSpecificResource(r.Client, pool, replicas, ownerRef); err != nil { + return ctrl.Result{}, err + } + notReadyPool := appsv1alpha1.IngressNotReadyPool{Name: pool, Info: nil} + instance.Status.Conditions.IngressNotReadyPools = append(instance.Status.Conditions.IngressNotReadyPools, notReadyPool) + } + } + if removedPools != nil { + klog.V(4).Infof("removed pool list is %s", removedPools) + isIngressCRChanged = true + for _, pool := range removedPools { + if desiredPoolNames == nil { + if err := yurtapputil.DeleteNginxIngressSpecificResource(r.Client, pool, true); err != nil { + return ctrl.Result{}, err + } + } else { + if err := yurtapputil.DeleteNginxIngressSpecificResource(r.Client, pool, false); err != nil { + return ctrl.Result{}, err + } + } + if !removePoolfromCondition(instance, pool) { + klog.V(4).Infof("Pool/%s is not found from conditions!", pool) + } + } + if desiredPoolNames == nil { + if err := yurtapputil.DeleteNginxIngressCommonResource(r.Client); err != nil { + return ctrl.Result{}, err + } + } + } + if unchangedPools != nil { + klog.V(4).Infof("unchanged pool list is %s", unchangedPools) + desiredReplicas := instance.Spec.Replicas + currentReplicas := instance.Status.Replicas + if desiredReplicas != currentReplicas { + klog.V(4).Infof("Per-Pool ingress controller replicas is changed!") + isIngressCRChanged = true + for _, pool := range unchangedPools { + if err := yurtapputil.ScaleNginxIngressControllerDeploymment(r.Client, pool, desiredReplicas); err != nil { + return ctrl.Result{}, err + } + } + } + } + r.updateStatus(instance, isIngressCRChanged) + return ctrl.Result{}, nil +} + +func getPools(desired, current []string) (added, removed, unchanged []string) { + swap := false + for i := 0; i < 2; i++ { + for _, s1 := range desired { + found := false + for _, s2 := range current { + if s1 == s2 { + found = true + if !swap { + unchanged = append(unchanged, s1) + } + break + } + } + if !found { + if !swap { + added = append(added, s1) + } else { + removed = append(removed, s1) + } + } + } + if i == 0 { + swap = true + desired, current = current, desired + } + } + return added, removed, unchanged +} + +func getDesiredPoolNames(ying *appsv1alpha1.YurtIngress) []string { + var desiredPoolNames []string + for _, pool := range ying.Spec.Pools { + desiredPoolNames = append(desiredPoolNames, pool.Name) + } + return desiredPoolNames +} + +func getCurrentPoolNames(ying *appsv1alpha1.YurtIngress) []string { + var currentPoolNames []string + currentPoolNames = ying.Status.Conditions.IngressReadyPools + for _, pool := range ying.Status.Conditions.IngressNotReadyPools { + currentPoolNames = append(currentPoolNames, pool.Name) + } + return currentPoolNames +} + +func removePoolfromCondition(ying *appsv1alpha1.YurtIngress, poolname string) bool { + for i, pool := range ying.Status.Conditions.IngressReadyPools { + if pool == poolname { + length := len(ying.Status.Conditions.IngressReadyPools) + if i == length-1 { + if length == 1 { + ying.Status.Conditions.IngressReadyPools = nil + } else { + ying.Status.Conditions.IngressReadyPools = ying.Status.Conditions.IngressReadyPools[:i-1] + } + } else { + ying.Status.Conditions.IngressReadyPools[i] = ying.Status.Conditions.IngressReadyPools[i+1] + } + if ying.Status.ReadyNum >= 1 { + ying.Status.ReadyNum -= 1 + } + return true + } + } + for i, pool := range ying.Status.Conditions.IngressNotReadyPools { + if pool.Name == poolname { + length := len(ying.Status.Conditions.IngressNotReadyPools) + if i == length-1 { + if length == 1 { + ying.Status.Conditions.IngressNotReadyPools = nil + } else { + ying.Status.Conditions.IngressNotReadyPools = ying.Status.Conditions.IngressNotReadyPools[:i-1] + } + } else { + ying.Status.Conditions.IngressNotReadyPools[i] = ying.Status.Conditions.IngressNotReadyPools[i+1] + } + if ying.Status.UnreadyNum >= 1 { + ying.Status.UnreadyNum -= 1 + } + return true + } + } + return false +} + +func (r *YurtIngressReconciler) updateStatus(ying *appsv1alpha1.YurtIngress, ingressCRChanged bool) error { + ying.Status.Replicas = ying.Spec.Replicas + if !ingressCRChanged { + deployments, err := r.getAllDeployments(ying) + if err != nil { + klog.V(4).Infof("Get all the ingress controller deployments err: %v", err) + return err + } + ying.Status.Conditions.IngressReadyPools = nil + ying.Status.Conditions.IngressNotReadyPools = nil + ying.Status.ReadyNum = 0 + for _, dply := range deployments { + pool := dply.ObjectMeta.GetLabels()[ingressDeploymentLabel] + if dply.Status.ReadyReplicas == ying.Spec.Replicas { + klog.V(4).Infof("Ingress on pool %s is ready!", pool) + ying.Status.ReadyNum += 1 + ying.Status.Conditions.IngressReadyPools = append(ying.Status.Conditions.IngressReadyPools, pool) + } else { + klog.V(4).Infof("Ingress on pool %s is NOT ready!", pool) + condition := getUnreadyDeploymentCondition(dply) + if condition == nil { + klog.V(4).Infof("Get deployment/%s conditions nil!", dply.GetName()) + } else { + notReadyPool := appsv1alpha1.IngressNotReadyPool{Name: pool, Info: condition} + ying.Status.Conditions.IngressNotReadyPools = append(ying.Status.Conditions.IngressNotReadyPools, notReadyPool) + } + } + } + ying.Status.UnreadyNum = int32(len(ying.Spec.Pools)) - ying.Status.ReadyNum + } + var updateErr error + for i, obj := 0, ying; i < updateRetries; i++ { + updateErr = r.Status().Update(context.TODO(), obj) + if updateErr == nil { + klog.V(4).Infof("%s status is updated!", obj.Name) + return nil + } + } + klog.Errorf("Fail to update YurtIngress %s status: %v", ying.Name, updateErr) + return updateErr +} + +func (r *YurtIngressReconciler) cleanupIngressResources(instance *appsv1alpha1.YurtIngress) (ctrl.Result, error) { + pools := getDesiredPoolNames(instance) + if pools != nil { + for _, pool := range pools { + if err := yurtapputil.DeleteNginxIngressSpecificResource(r.Client, pool, true); err != nil { + return ctrl.Result{}, err + } + } + if err := yurtapputil.DeleteNginxIngressCommonResource(r.Client); err != nil { + return ctrl.Result{}, err + } + } + if controllerutil.ContainsFinalizer(instance, appsv1alpha1.YurtIngressFinalizer) { + controllerutil.RemoveFinalizer(instance, appsv1alpha1.YurtIngressFinalizer) + if err := r.Update(context.TODO(), instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +func prepareDeploymentOwnerReferences(instance *appsv1alpha1.YurtIngress) *metav1.OwnerReference { + isController := true + isBlockOwnerDeletion := true + ownerRef := metav1.OwnerReference{ + //TODO: optimze the APIVersion/Kind with instance values + APIVersion: "apps.openyurt.io/v1alpha1", + Kind: "YurtIngress", + Name: instance.Name, + UID: instance.UID, + Controller: &isController, + BlockOwnerDeletion: &isBlockOwnerDeletion, + } + return &ownerRef +} + +// getAllDeployments returns all of deployments owned by YurtIngress +func (r *YurtIngressReconciler) getAllDeployments(ying *appsv1alpha1.YurtIngress) ([]*appsv1.Deployment, error) { + labelSelector := metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: ingressDeploymentLabel, + Operator: metav1.LabelSelectorOpExists, + }, + }, + } + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return nil, err + } + + dplyList := &appsv1.DeploymentList{} + err = r.Client.List(context.TODO(), dplyList, &client.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + + manager, err := refmanager.New(r.Client, &labelSelector, ying, r.Scheme) + if err != nil { + return nil, err + } + + selected := make([]metav1.Object, len(dplyList.Items)) + for i, dply := range dplyList.Items { + selected[i] = dply.DeepCopy() + } + claimed, err := manager.ClaimOwnedObjects(selected) + if err != nil { + return nil, err + } + + claimedDplys := make([]*appsv1.Deployment, len(claimed)) + for i, dply := range claimed { + claimedDplys[i] = dply.(*appsv1.Deployment) + } + return claimedDplys, nil +} + +func getUnreadyDeploymentCondition(dply *appsv1.Deployment) (info *appsv1alpha1.IngressNotReadyConditionInfo) { + len := len(dply.Status.Conditions) + if len == 0 { + return nil + } + var conditionInfo appsv1alpha1.IngressNotReadyConditionInfo + condition := dply.Status.Conditions[len-1] + if condition.Type == appsv1.DeploymentReplicaFailure { + conditionInfo.Type = appsv1alpha1.IngressFailure + } else { + conditionInfo.Type = appsv1alpha1.IngressPending + } + conditionInfo.LastTransitionTime = condition.LastTransitionTime + conditionInfo.Message = condition.Message + conditionInfo.Reason = condition.Reason + return &conditionInfo +} diff --git a/pkg/yurtappmanager/util/kubernetes/apply_addons.go b/pkg/yurtappmanager/util/kubernetes/apply_addons.go new file mode 100644 index 0000000..8f6b061 --- /dev/null +++ b/pkg/yurtappmanager/util/kubernetes/apply_addons.go @@ -0,0 +1,294 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/constant" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func CreateNginxIngressCommonResource(client client.Client) error { + // 1. Create Namespace + if err := CreateNamespaceFromYaml(client, constant.NginxIngressControllerNamespace); err != nil { + klog.Errorf("%v", err) + return err + } + // 2. Create ClusterRole + if err := CreateClusterRoleFromYaml(client, constant.NginxIngressControllerClusterRole); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateClusterRoleFromYaml(client, constant.NginxIngressAdmissionWebhookClusterRole); err != nil { + klog.Errorf("%v", err) + return err + } + // 3. Create ClusterRoleBinding + if err := CreateClusterRoleBindingFromYaml(client, + constant.NginxIngressControllerClusterRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateClusterRoleBindingFromYaml(client, + constant.NginxIngressAdmissionWebhookClusterRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + // 4. Create Role + if err := CreateRoleFromYaml(client, + constant.NginxIngressControllerRole); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateRoleFromYaml(client, + constant.NginxIngressAdmissionWebhookRole); err != nil { + klog.Errorf("%v", err) + return err + } + // 5. Create RoleBinding + if err := CreateRoleBindingFromYaml(client, + constant.NginxIngressControllerRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateRoleBindingFromYaml(client, + constant.NginxIngressAdmissionWebhookRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + // 6. Create ServiceAccount + if err := CreateServiceAccountFromYaml(client, + constant.NginxIngressControllerServiceAccount); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateServiceAccountFromYaml(client, + constant.NginxIngressAdmissionWebhookServiceAccount); err != nil { + klog.Errorf("%v", err) + return err + } + // 7. Create Configmap + if err := CreateConfigMapFromYaml(client, + constant.NginxIngressControllerConfigMap); err != nil { + klog.Errorf("%v", err) + return err + } + return nil +} + +func DeleteNginxIngressCommonResource(client client.Client) error { + // 1. Delete Configmap + if err := DeleteConfigMapFromYaml(client, + constant.NginxIngressControllerConfigMap); err != nil { + klog.Errorf("%v", err) + return err + } + // 2. Delete ServiceAccount + if err := DeleteServiceAccountFromYaml(client, + constant.NginxIngressControllerServiceAccount); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteServiceAccountFromYaml(client, + constant.NginxIngressAdmissionWebhookServiceAccount); err != nil { + klog.Errorf("%v", err) + return err + } + // 3. Delete RoleBinding + if err := DeleteRoleBindingFromYaml(client, + constant.NginxIngressControllerRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteRoleBindingFromYaml(client, + constant.NginxIngressAdmissionWebhookRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + // 4. Delete Role + if err := DeleteRoleFromYaml(client, + constant.NginxIngressControllerRole); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteRoleFromYaml(client, + constant.NginxIngressAdmissionWebhookRole); err != nil { + klog.Errorf("%v", err) + return err + } + // 5. Delete ClusterRoleBinding + if err := DeleteClusterRoleBindingFromYaml(client, + constant.NginxIngressControllerClusterRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteClusterRoleBindingFromYaml(client, + constant.NginxIngressAdmissionWebhookClusterRoleBinding); err != nil { + klog.Errorf("%v", err) + return err + } + // 6. Delete ClusterRole + if err := DeleteClusterRoleFromYaml(client, constant.NginxIngressControllerClusterRole); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteClusterRoleFromYaml(client, constant.NginxIngressAdmissionWebhookClusterRole); err != nil { + klog.Errorf("%v", err) + return err + } + // 7. Delete Namespace + if err := DeleteNamespaceFromYaml(client, constant.NginxIngressControllerNamespace); err != nil { + klog.Errorf("%v", err) + return err + } + return nil +} + +func CreateNginxIngressSpecificResource(client client.Client, poolname string, replicas int32, ownerRef *metav1.OwnerReference) error { + // 1. Create Deployment + if err := CreateDeployFromYaml(client, + constant.NginxIngressControllerNodePoolDeployment, + replicas, + ownerRef, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateDeployFromYaml(client, + constant.NginxIngressAdmissionWebhookDeployment, + 1, + nil, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 2. Create Service + if err := CreateServiceFromYaml(client, + constant.NginxIngressControllerService, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + if err := CreateServiceFromYaml(client, + constant.NginxIngressAdmissionWebhookService, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 3. Create ValidatingWebhookConfiguration + if err := CreateValidatingWebhookConfigurationFromYaml(client, + constant.NginxIngressValidatingWebhookConfiguration, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 4. Create Job + if err := CreateJobFromYaml(client, + constant.NginxIngressAdmissionWebhookJob, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 5. Create Job Patch + if err := CreateJobFromYaml(client, + constant.NginxIngressAdmissionWebhookJobPatch, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + return nil +} + +func DeleteNginxIngressSpecificResource(client client.Client, poolname string, cleanup bool) error { + // 1. Delete Deployment + if err := DeleteDeployFromYaml(client, + constant.NginxIngressControllerNodePoolDeployment, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteDeployFromYaml(client, + constant.NginxIngressAdmissionWebhookDeployment, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 2. Delete Service + if err := DeleteServiceFromYaml(client, + constant.NginxIngressControllerService, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + if err := DeleteServiceFromYaml(client, + constant.NginxIngressAdmissionWebhookService, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 3. Delete ValidatingWebhookConfiguration + if err := DeleteValidatingWebhookConfigurationFromYaml(client, + constant.NginxIngressValidatingWebhookConfiguration, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 4. Delete Job + if err := DeleteJobFromYaml(client, + constant.NginxIngressAdmissionWebhookJob, + cleanup, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + // 5. Delete Job Patch + if err := DeleteJobFromYaml(client, + constant.NginxIngressAdmissionWebhookJobPatch, + cleanup, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + return nil +} + +func ScaleNginxIngressControllerDeploymment(client client.Client, poolname string, replicas int32) error { + if err := UpdateDeployFromYaml(client, + constant.NginxIngressControllerNodePoolDeployment, + &replicas, + map[string]string{ + "nodepool_name": poolname}); err != nil { + klog.Errorf("%v", err) + return err + } + return nil +} diff --git a/pkg/yurtappmanager/util/kubernetes/util.go b/pkg/yurtappmanager/util/kubernetes/util.go new file mode 100644 index 0000000..8c747d9 --- /dev/null +++ b/pkg/yurtappmanager/util/kubernetes/util.go @@ -0,0 +1,576 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "text/template" + "time" + + "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var deleteOptions *client.DeleteOptions + +func init() { + policy := metav1.DeletePropagationForeground + o := &client.DeleteOptions{PropagationPolicy: &policy} + deleteOptions = &client.DeleteOptions{} + o.ApplyToDelete(deleteOptions) +} + +// CreateNamespaceFromYaml creates the Namespace from the yaml template. +func CreateNamespaceFromYaml(client client.Client, crTmpl string) error { + obj, err := YamlToObject([]byte(crTmpl)) + if err != nil { + return err + } + ns, ok := obj.(*corev1.Namespace) + if !ok { + return fmt.Errorf("fail to assert namespace") + } + err = client.Create(context.Background(), ns) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the namespace/%s: %v", ns.Name, err) + } + } + time.Sleep(1 * time.Second) + klog.V(4).Infof("namespace/%s is created", ns.Name) + return nil +} + +// DeleteNamespaceFromYaml deletes the Namespace from the yaml template. +func DeleteNamespaceFromYaml(client client.Client, crTmpl string) error { + obj, err := YamlToObject([]byte(crTmpl)) + if err != nil { + return err + } + ns, ok := obj.(*corev1.Namespace) + if !ok { + return fmt.Errorf("fail to assert namespace") + } + err = client.Delete(context.Background(), ns) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the namespace/%s: %v", ns.Name, err) + } + } + klog.V(4).Infof("namespace/%s is deleted", ns.Name) + return nil +} + +// CreateClusterRoleFromYaml creates the ClusterRole from the yaml template. +func CreateClusterRoleFromYaml(client client.Client, crTmpl string) error { + obj, err := YamlToObject([]byte(crTmpl)) + if err != nil { + return err + } + cr, ok := obj.(*rbacv1.ClusterRole) + if !ok { + return fmt.Errorf("fail to assert clusterrole") + } + err = client.Create(context.Background(), cr) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the clusterrole/%s: %v", cr.Name, err) + } + } + klog.V(4).Infof("clusterrole/%s is created", cr.Name) + return nil +} + +// DeleteClusterRoleFromYaml deletes the ClusterRole from the yaml template. +func DeleteClusterRoleFromYaml(client client.Client, crTmpl string) error { + obj, err := YamlToObject([]byte(crTmpl)) + if err != nil { + return err + } + cr, ok := obj.(*rbacv1.ClusterRole) + if !ok { + return fmt.Errorf("fail to assert clusterrole") + } + err = client.Delete(context.Background(), cr) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the clusterrole/%s: %v", cr.Name, err) + } + } + klog.V(4).Infof("clusterrole/%s is deleted", cr.Name) + return nil +} + +// CreateClusterRoleBindingFromYaml creates the ClusterRoleBinding from the yaml template. +func CreateClusterRoleBindingFromYaml(client client.Client, crbTmpl string) error { + obj, err := YamlToObject([]byte(crbTmpl)) + if err != nil { + return err + } + crb, ok := obj.(*rbacv1.ClusterRoleBinding) + if !ok { + return fmt.Errorf("fail to assert clusterrolebinding") + } + err = client.Create(context.Background(), crb) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the clusterrolebinding/%s: %v", crb.Name, err) + } + } + klog.V(4).Infof("clusterrolebinding/%s is created", crb.Name) + return nil +} + +// DeleteClusterRoleBindingFromYaml deletes the ClusterRoleBinding from the yaml template. +func DeleteClusterRoleBindingFromYaml(client client.Client, crbTmpl string) error { + obj, err := YamlToObject([]byte(crbTmpl)) + if err != nil { + return err + } + crb, ok := obj.(*rbacv1.ClusterRoleBinding) + if !ok { + return fmt.Errorf("fail to assert clusterrolebinding") + } + err = client.Delete(context.Background(), crb) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the clusterrolebinding/%s: %v", crb.Name, err) + } + } + klog.V(4).Infof("clusterrolebinding/%s is deleted", crb.Name) + return nil +} + +// CreateRoleFromYaml creates the Role from the yaml template. +func CreateRoleFromYaml(client client.Client, rTmpl string) error { + obj, err := YamlToObject([]byte(rTmpl)) + if err != nil { + return err + } + r, ok := obj.(*rbacv1.Role) + if !ok { + return fmt.Errorf("fail to assert role") + } + err = client.Create(context.Background(), r) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the role/%s: %v", r.Name, err) + } + } + klog.V(4).Infof("role/%s is created", r.Name) + return nil +} + +// DeleteRoleFromYaml deletes the Role from the yaml template. +func DeleteRoleFromYaml(client client.Client, rTmpl string) error { + obj, err := YamlToObject([]byte(rTmpl)) + if err != nil { + return err + } + r, ok := obj.(*rbacv1.Role) + if !ok { + return fmt.Errorf("fail to assert role") + } + err = client.Delete(context.Background(), r) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the role/%s: %v", r.Name, err) + } + } + klog.V(4).Infof("role/%s is deleted", r.Name) + return nil +} + +// CreateRoleBindingFromYaml creates the RoleBinding from the yaml template. +func CreateRoleBindingFromYaml(client client.Client, rbTmpl string) error { + obj, err := YamlToObject([]byte(rbTmpl)) + if err != nil { + return err + } + rb, ok := obj.(*rbacv1.RoleBinding) + if !ok { + return fmt.Errorf("fail to assert rolebinding") + } + err = client.Create(context.Background(), rb) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the rolebinding/%s: %v", rb.Name, err) + } + } + klog.V(4).Infof("rolebinding/%s is created", rb.Name) + return nil +} + +// DeleteRoleBindingFromYaml delete the RoleBinding from the yaml template. +func DeleteRoleBindingFromYaml(client client.Client, rbTmpl string) error { + obj, err := YamlToObject([]byte(rbTmpl)) + if err != nil { + return err + } + rb, ok := obj.(*rbacv1.RoleBinding) + if !ok { + return fmt.Errorf("fail to assert rolebinding") + } + err = client.Delete(context.Background(), rb) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the rolebinding/%s: %v", rb.Name, err) + } + } + klog.V(4).Infof("rolebinding/%s is deleted", rb.Name) + return nil +} + +// CreateServiceAccountFromYaml creates the ServiceAccount from the yaml template. +func CreateServiceAccountFromYaml(client client.Client, saTmpl string) error { + obj, err := YamlToObject([]byte(saTmpl)) + if err != nil { + return err + } + sa, ok := obj.(*corev1.ServiceAccount) + if !ok { + return fmt.Errorf("fail to assert serviceaccount") + } + err = client.Create(context.Background(), sa) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the serviceaccount/%s: %v", sa.Name, err) + } + } + klog.V(4).Infof("serviceaccount/%s is created", sa.Name) + return nil +} + +// DeleteServiceAccountFromYaml deletes the ServiceAccount from the yaml template. +func DeleteServiceAccountFromYaml(client client.Client, saTmpl string) error { + obj, err := YamlToObject([]byte(saTmpl)) + if err != nil { + return err + } + sa, ok := obj.(*corev1.ServiceAccount) + if !ok { + return fmt.Errorf("fail to assert serviceaccount") + } + err = client.Delete(context.Background(), sa) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the serviceaccount/%s: %v", sa.Name, err) + } + } + klog.V(4).Infof("serviceaccount/%s is deleted", sa.Name) + return nil +} + +// CreateConfigMapFromYaml creates the ConfigMap from the yaml template. +func CreateConfigMapFromYaml(client client.Client, cmTmpl string) error { + obj, err := YamlToObject([]byte(cmTmpl)) + if err != nil { + return err + } + cm, ok := obj.(*corev1.ConfigMap) + if !ok { + return fmt.Errorf("fail to assert configmap") + } + err = client.Create(context.Background(), cm) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the configmap/%s: %v", cm.Name, err) + } + } + klog.V(4).Infof("configmap/%s is created", cm.Name) + return nil +} + +// DeleteConfigMapFromYaml deletes the ConfigMap from the yaml template. +func DeleteConfigMapFromYaml(client client.Client, cmTmpl string) error { + obj, err := YamlToObject([]byte(cmTmpl)) + if err != nil { + return err + } + cm, ok := obj.(*corev1.ConfigMap) + if !ok { + return fmt.Errorf("fail to assert configmap") + } + err = client.Delete(context.Background(), cm) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the configmap/%s: %v", cm.Name, err) + } + } + klog.V(4).Infof("configmap/%s is deleted", cm.Name) + return nil +} + +// CreateDeployFromYaml creates the Deployment from the yaml template. +func CreateDeployFromYaml(client client.Client, dplyTmpl string, replicas int32, ownerRef *metav1.OwnerReference, ctx interface{}) error { + dp, err := SubsituteTemplate(dplyTmpl, ctx) + if err != nil { + return err + } + dpObj, err := YamlToObject([]byte(dp)) + if err != nil { + return err + } + dply, ok := dpObj.(*appsv1.Deployment) + if !ok { + return fmt.Errorf("fail to assert deployment") + } + if ownerRef != nil { + ownerRefs := dply.ObjectMeta.GetOwnerReferences() + ownerRefs = append(ownerRefs, *ownerRef) + dply.ObjectMeta.SetOwnerReferences(ownerRefs) + } + dply.Spec.Replicas = &replicas + err = client.Create(context.Background(), dply) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the deployment/%s: %v", dply.Name, err) + } + } + klog.V(4).Infof("deployment/%s is created", dply.Name) + return nil +} + +// DeleteDeployFromYaml delete the Deployment from the yaml template. +func DeleteDeployFromYaml(client client.Client, dplyTmpl string, ctx interface{}) error { + dp, err := SubsituteTemplate(dplyTmpl, ctx) + if err != nil { + return err + } + dpObj, err := YamlToObject([]byte(dp)) + if err != nil { + return err + } + dply, ok := dpObj.(*appsv1.Deployment) + if !ok { + return fmt.Errorf("fail to assert deployment") + } + err = client.Delete(context.Background(), dply) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the deployment/%s: %v", dply.Name, err) + } + } + klog.V(4).Infof("deployment/%s is deleted", dply.Name) + return nil +} + +// UpdateDeployFromYaml updates the Deployment from the yaml template. +func UpdateDeployFromYaml(client client.Client, dplyTmpl string, replicas *int32, ctx interface{}) error { + dp, err := SubsituteTemplate(dplyTmpl, ctx) + if err != nil { + return err + } + dpObj, err := YamlToObject([]byte(dp)) + if err != nil { + return err + } + dply, ok := dpObj.(*appsv1.Deployment) + if !ok { + return fmt.Errorf("fail to assert deployment") + } + dply.Spec.Replicas = replicas + err = client.Update(context.Background(), dply) + if err != nil { + return fmt.Errorf("fail to update the deployment/%s: %v", dply.Name, err) + } + klog.V(4).Infof("deployment/%s is updated", dply.Name) + return nil +} + +// CreateServiceFromYaml creates the Service from the yaml template. +func CreateServiceFromYaml(client client.Client, svcTmpl string, ctx interface{}) error { + sv, err := SubsituteTemplate(svcTmpl, ctx) + if err != nil { + return err + } + svcObj, err := YamlToObject([]byte(sv)) + if err != nil { + return err + } + svc, ok := svcObj.(*corev1.Service) + if !ok { + return fmt.Errorf("fail to assert service") + } + err = client.Create(context.Background(), svc) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the service/%s: %v", svc.Name, err) + } + } + klog.V(4).Infof("service/%s is created", svc.Name) + return nil +} + +// DeleteServiceFromYaml deletes the Service from the yaml template. +func DeleteServiceFromYaml(client client.Client, svcTmpl string, ctx interface{}) error { + sv, err := SubsituteTemplate(svcTmpl, ctx) + if err != nil { + return err + } + svcObj, err := YamlToObject([]byte(sv)) + if err != nil { + return err + } + svc, ok := svcObj.(*corev1.Service) + if !ok { + return fmt.Errorf("fail to assert service") + } + err = client.Delete(context.Background(), svc) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the service/%s: %v", svc.Name, err) + } + } + klog.V(4).Infof("service/%s is deleted", svc.Name) + return nil +} + +// CreateValidatingWebhookConfigurationFromYaml creates the validatingwebhookconfiguration from the yaml template. +func CreateValidatingWebhookConfigurationFromYaml(client client.Client, vwcTmpl string, ctx interface{}) error { + vw, err := SubsituteTemplate(vwcTmpl, ctx) + if err != nil { + return err + } + vwcObj, err := YamlToObject([]byte(vw)) + if err != nil { + return err + } + vwc, ok := vwcObj.(*v1beta1.ValidatingWebhookConfiguration) + if !ok { + return fmt.Errorf("fail to assert validatingwebhookconfiguration") + } + err = client.Create(context.Background(), vwc) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the validatingwebhookconfiguration/%s: %v", vwc.Name, err) + } + } + klog.V(4).Infof("validatingwebhookconfiguration/%s is created", vwc.Name) + return nil +} + +// DeleteValidatingWebhookConfigurationFromYaml delete the validatingwebhookconfiguration from the yaml template. +func DeleteValidatingWebhookConfigurationFromYaml(client client.Client, vwcTmpl string, ctx interface{}) error { + vw, err := SubsituteTemplate(vwcTmpl, ctx) + if err != nil { + return err + } + vwcObj, err := YamlToObject([]byte(vw)) + if err != nil { + return err + } + vwc, ok := vwcObj.(*v1beta1.ValidatingWebhookConfiguration) + if !ok { + return fmt.Errorf("fail to assert validatingwebhookconfiguration") + } + err = client.Delete(context.Background(), vwc) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the validatingwebhookconfiguration/%s: %s", vwc.Name, err) + } + } + klog.V(4).Infof("validatingwebhookconfiguration/%s is deleted", vwc.Name) + return nil +} + +// CreateJobFromYaml creates the Job from the yaml template. +func CreateJobFromYaml(client client.Client, jobTmpl string, ctx interface{}) error { + jb, err := SubsituteTemplate(jobTmpl, ctx) + if err != nil { + return err + } + jbObj, err := YamlToObject([]byte(jb)) + if err != nil { + return err + } + job, ok := jbObj.(*batchv1.Job) + if !ok { + return fmt.Errorf("fail to assert job") + } + err = client.Create(context.Background(), job) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("fail to create the job/%s: %v", job.Name, err) + } + } + klog.V(4).Infof("job/%s is created", job.Name) + return nil +} + +// DeleteJobFromYaml deletes the Job from the yaml template. +func DeleteJobFromYaml(client client.Client, jobTmpl string, cleanup bool, ctx interface{}) error { + jb, err := SubsituteTemplate(jobTmpl, ctx) + if err != nil { + return err + } + jbObj, err := YamlToObject([]byte(jb)) + if err != nil { + return err + } + job, ok := jbObj.(*batchv1.Job) + if !ok { + return fmt.Errorf("fail to assert job") + } + if cleanup { + err = client.Delete(context.Background(), job) + } else { + err = client.Delete(context.Background(), job, deleteOptions) + } + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("fail to delete the job/%s: %v", job.Name, err) + } + } + klog.V(4).Infof("job/%s is deleted", job.Name) + return nil +} + +// YamlToObject deserializes object in yaml format to a runtime.Object +func YamlToObject(yamlContent []byte) (k8sruntime.Object, error) { + decode := serializer.NewCodecFactory(scheme.Scheme).UniversalDeserializer().Decode + obj, _, err := decode(yamlContent, nil, nil) + if err != nil { + return nil, err + } + return obj, nil +} + +// SubsituteTemplate fills out the template based on the context +func SubsituteTemplate(tmpl string, context interface{}) (string, error) { + t, tmplPrsErr := template.New("test").Option("missingkey=zero").Parse(tmpl) + if tmplPrsErr != nil { + return "", tmplPrsErr + } + writer := bytes.NewBuffer([]byte{}) + if err := t.Execute(writer, context); nil != err { + return "", err + } + return writer.String(), nil +} From ff57388037a08d19c76b820f57ea4d7f59eb7de4 Mon Sep 17 00:00:00 2001 From: Linda Yu Date: Mon, 27 Dec 2021 05:56:18 +0800 Subject: [PATCH 2/2] ingress: add admission webhook for YurtIngress operator Signed-off-by: Linda Yu --- .../yurt-app-manager/webhook/manifests.yaml | 19 +++ pkg/yurtappmanager/webhook/add_yurtingress.go | 32 +++++ .../webhook/yurtingress/mutating/webhooks.go | 30 +++++ .../yurtingress_create_update_handler.go | 69 +++++++++++ .../yurtingress/validating/webhooks.go | 30 +++++ .../yurtingress_create_update_handler.go | 111 ++++++++++++++++++ .../validating/yurtingress_validating.go | 77 ++++++++++++ 7 files changed, 368 insertions(+) create mode 100644 pkg/yurtappmanager/webhook/add_yurtingress.go create mode 100644 pkg/yurtappmanager/webhook/yurtingress/mutating/webhooks.go create mode 100644 pkg/yurtappmanager/webhook/yurtingress/mutating/yurtingress_create_update_handler.go create mode 100644 pkg/yurtappmanager/webhook/yurtingress/validating/webhooks.go create mode 100644 pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_create_update_handler.go create mode 100644 pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_validating.go diff --git a/config/yurt-app-manager/webhook/manifests.yaml b/config/yurt-app-manager/webhook/manifests.yaml index 85f85c2..211ecb8 100644 --- a/config/yurt-app-manager/webhook/manifests.yaml +++ b/config/yurt-app-manager/webhook/manifests.yaml @@ -123,3 +123,22 @@ webhooks: - UPDATE resources: - yurtappdaemons +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-apps-openyurt-io-v1alpha1-yurtingress + failurePolicy: Fail + name: vyurtingress.kb.io + rules: + - apiGroups: + - apps.openyurt.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - yurtingresses diff --git a/pkg/yurtappmanager/webhook/add_yurtingress.go b/pkg/yurtappmanager/webhook/add_yurtingress.go new file mode 100644 index 0000000..4194431 --- /dev/null +++ b/pkg/yurtappmanager/webhook/add_yurtingress.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Openyurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/util/gate" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/yurtingress/mutating" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/yurtingress/validating" +) + +func init() { + if !gate.ResourceEnabled(&appsv1alpha1.YurtIngress{}) { + return + } + addHandlers(mutating.HandlerMap) + addHandlers(validating.HandlerMap) +} diff --git a/pkg/yurtappmanager/webhook/yurtingress/mutating/webhooks.go b/pkg/yurtappmanager/webhook/yurtingress/mutating/webhooks.go new file mode 100644 index 0000000..4174adc --- /dev/null +++ b/pkg/yurtappmanager/webhook/yurtingress/mutating/webhooks.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Openyurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + webhookutil "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/util" +) + +// kubebuilder:webhook:path=/mutate-apps-openyurt-io-v1alpha1-yurtingress,mutating=true,failurePolicy=fail,groups=apps.openyurt.io,resources=yurtingresses,verbs=create;update,versions=v1alpha1,name=myurtingress.kb.io + +var ( + // HandlerMap contains admission webhook handlers + HandlerMap = map[string]webhookutil.Handler{ + "mutate-apps-openyurt-io-v1alpha1-yurtingress": &YurtIngressCreateUpdateHandler{}, + } +) diff --git a/pkg/yurtappmanager/webhook/yurtingress/mutating/yurtingress_create_update_handler.go b/pkg/yurtappmanager/webhook/yurtingress/mutating/yurtingress_create_update_handler.go new file mode 100644 index 0000000..4db54e8 --- /dev/null +++ b/pkg/yurtappmanager/webhook/yurtingress/mutating/yurtingress_create_update_handler.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "encoding/json" + "net/http" + + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/util" + webhookutil "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/util" +) + +// YurtIngressCreateUpdateHandler handles YurtIngress +type YurtIngressCreateUpdateHandler struct { + // Decoder decodes objects + Decoder *admission.Decoder +} + +var _ webhookutil.Handler = &YurtIngressCreateUpdateHandler{} + +func (h *YurtIngressCreateUpdateHandler) SetOptions(options webhookutil.Options) { + //return +} + +// Handle handles admission requests. +func (h *YurtIngressCreateUpdateHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + np_ing := appsv1alpha1.YurtIngress{} + err := h.Decoder.Decode(req, &np_ing) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + marshalled, err := json.Marshal(&np_ing) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, + marshalled) + if len(resp.Patches) > 0 { + klog.V(5).Infof("Admit YurtIngress %s patches: %v", np_ing.Name, util.DumpJSON(resp.Patches)) + } + return resp +} + +var _ admission.DecoderInjector = &YurtIngressCreateUpdateHandler{} + +func (h *YurtIngressCreateUpdateHandler) InjectDecoder(d *admission.Decoder) error { + h.Decoder = d + return nil +} diff --git a/pkg/yurtappmanager/webhook/yurtingress/validating/webhooks.go b/pkg/yurtappmanager/webhook/yurtingress/validating/webhooks.go new file mode 100644 index 0000000..160d559 --- /dev/null +++ b/pkg/yurtappmanager/webhook/yurtingress/validating/webhooks.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Openyurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + webhookutil "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/util" +) + +// +kubebuilder:webhook:path=/validate-apps-openyurt-io-v1alpha1-yurtingress,mutating=false,failurePolicy=fail,groups=apps.openyurt.io,resources=yurtingresses,verbs=create;update;delete,versions=v1alpha1,name=vyurtingress.kb.io + +var ( + // HandlerMap contains admission webhook handlers + HandlerMap = map[string]webhookutil.Handler{ + "validate-apps-openyurt-io-v1alpha1-yurtingress": &YurtIngressCreateUpdateHandler{}, + } +) diff --git a/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_create_update_handler.go b/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_create_update_handler.go new file mode 100644 index 0000000..08156be --- /dev/null +++ b/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_create_update_handler.go @@ -0,0 +1,111 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + "net/http" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" + webhookutil "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/webhook/util" +) + +// YurtIngressCreateUpdateHandler handles YurtIngress +type YurtIngressCreateUpdateHandler struct { + Client client.Client + + // Decoder decodes objects + Decoder *admission.Decoder +} + +var _ webhookutil.Handler = &YurtIngressCreateUpdateHandler{} + +func (h *YurtIngressCreateUpdateHandler) SetOptions(options webhookutil.Options) { + return +} + +// Handle handles admission requests. +func (h *YurtIngressCreateUpdateHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ingress := appsv1alpha1.YurtIngress{} + + // singleton node pool validating + if req.Name != appsv1alpha1.SingletonYurtIngressInstanceName { + var msg = "please name YurtIngress with " + appsv1alpha1.SingletonYurtIngressInstanceName + " instead of " + req.Name + klog.Errorf(msg) + return admission.ValidationResponse(false, msg) + } + + switch req.AdmissionRequest.Operation { + case admissionv1.Create: + klog.V(4).Info("capture the yurtingress creation request") + + if err := h.Decoder.Decode(req, &ingress); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + if allErrs := validateYurtIngressSpec(h.Client, &ingress.Spec); len(allErrs) > 0 { + return admission.Errored(http.StatusUnprocessableEntity, + allErrs.ToAggregate()) + } + case admissionv1.Update: + klog.V(4).Info("capture the yurtingress update request") + if err := h.Decoder.Decode(req, &ingress); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + oingress := appsv1alpha1.YurtIngress{} + if err := h.Decoder.DecodeRaw(req.OldObject, &oingress); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + if allErrs := validateYurtIngressSpecUpdate(h.Client, &ingress.Spec, &oingress.Spec); len(allErrs) > 0 { + return admission.Errored(http.StatusUnprocessableEntity, + allErrs.ToAggregate()) + } + case admissionv1.Delete: + klog.V(4).Info("capture the yurtingress deletion request") + if err := h.Decoder.DecodeRaw(req.OldObject, &ingress); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + if allErrs := validateYurtIngressSpecDeletion(h.Client, &ingress.Spec); len(allErrs) > 0 { + return admission.Errored(http.StatusUnprocessableEntity, + allErrs.ToAggregate()) + } + } + + return admission.ValidationResponse(true, "") +} + +var _ admission.DecoderInjector = &YurtIngressCreateUpdateHandler{} + +// InjectDecoder injects the decoder into the YurtIngressCreateUpdateHandler +func (h *YurtIngressCreateUpdateHandler) InjectDecoder(d *admission.Decoder) error { + h.Decoder = d + return nil +} + +var _ inject.Client = &YurtIngressCreateUpdateHandler{} + +// InjectClient injects the client into the PodCreateHandler +func (h *YurtIngressCreateUpdateHandler) InjectClient(c client.Client) error { + h.Client = c + return nil +} diff --git a/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_validating.go b/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_validating.go new file mode 100644 index 0000000..ff39be1 --- /dev/null +++ b/pkg/yurtappmanager/webhook/yurtingress/validating/yurtingress_validating.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validating + +import ( + "context" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "github.com/openyurtio/yurt-app-manager/pkg/yurtappmanager/apis/apps/v1alpha1" +) + +// validateYurtIngressSpec validates the yurt ingress spec. +func validateYurtIngressSpec(c client.Client, spec *appsv1alpha1.YurtIngressSpec) field.ErrorList { + if len(spec.Pools) > 0 { + var err error + var errmsg string + nps := appsv1alpha1.NodePoolList{} + if err = c.List(context.TODO(), &nps, &client.ListOptions{}); err != nil { + errmsg = "List nodepool list error!" + klog.Errorf(errmsg) + return field.ErrorList([]*field.Error{ + field.Forbidden(field.NewPath("spec").Child("pools"), errmsg)}) + } + + // validate whether the nodepool exist + if len(nps.Items) <= 0 { + errmsg = "No nodepool is created in the cluster!" + klog.Errorf(errmsg) + return field.ErrorList([]*field.Error{ + field.Forbidden(field.NewPath("spec").Child("pools"), errmsg)}) + } else { + var found = false + for _, snp := range spec.Pools { //go through the nodepools setting in yaml + for _, cnp := range nps.Items { //go through the nodepools in cluster + if snp.Name == cnp.ObjectMeta.Name { + found = true + break + } + } + if !found { + errmsg = snp.Name + " does not exist in the cluster!" + klog.Errorf(errmsg) + return field.ErrorList([]*field.Error{ + field.Forbidden(field.NewPath("spec").Child("pools"), errmsg)}) + } + found = false + } + + } + } + return nil +} + +func validateYurtIngressSpecUpdate(c client.Client, spec, oldSpec *appsv1alpha1.YurtIngressSpec) field.ErrorList { + return validateYurtIngressSpec(c, spec) +} + +func validateYurtIngressSpecDeletion(c client.Client, spec *appsv1alpha1.YurtIngressSpec) field.ErrorList { + return validateYurtIngressSpec(c, spec) +}