From c2bdffbd4e8ebac58f4877fe65f163faf7c24853 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Wed, 2 Feb 2022 16:34:03 +0100 Subject: [PATCH 01/28] Remove the microshift-containerized subpackage (#595) Our docs explain how to download the .service file [1] instead, and it has proven problematic to package this. Fix the microshift.service being overwritten by microshift-containerized, even when the non-containerized package only is installed. Resolves-Issue: #591 Signed-off-by: Miguel Angel Ajo --- packaging/rpm/microshift.spec | 54 +++++------------------------------ 1 file changed, 7 insertions(+), 47 deletions(-) diff --git a/packaging/rpm/microshift.spec b/packaging/rpm/microshift.spec index 39dc364f6a8..097924c8b63 100644 --- a/packaging/rpm/microshift.spec +++ b/packaging/rpm/microshift.spec @@ -78,44 +78,6 @@ systems, scale testing, and provisioning of lightweight Kubernetes control plane Note: MicroShift is still early days and moving fast. Features are missing. Things break. But you can still help shape it, too. -%package containerized -Summary: Containerized systemd files for MicroShift -BuildArch: noarch -Requires: crio -Requires: cri-tools -Requires: microshift-selinux -Requires: podman -%{?selinux_requires} - -%description containerized -This is the containerized version of MicroShift. - -MicroShift is a research project that is exploring how OpenShift Kubernetes -can be optimized for small form factor and edge computing. - -Edge devices deployed out in the field pose very different operational, -environmental, and business challenges from those of cloud computing. -These motivate different engineering -trade-offs for Kubernetes at the far edge than for cloud or near-edge -scenarios. MicroShift's design goals cater to this: - -make frugal use of system resources (CPU, memory, network, storage, etc.), -tolerate severe networking constraints, update (resp. roll back) securely, -safely, speedily, and seamlessly (without disrupting workloads), and build on -and integrate cleanly with edge-optimized OSes like Fedora IoT and RHEL for Edge, -while providing a consistent development and management experience with standard -OpenShift. - -We believe these properties should also make MicroShift a great tool for other -use cases such as Kubernetes applications development on resource-constrained -systems, scale testing, and provisioning of lightweight Kubernetes control planes. - -Note: MicroShift is still early days and moving fast. Features are missing. -Things break. But you can still help shape it, too. - -%define microshift_relabel_files() \ -restorecon -R /var/hpvolumes - %package selinux Summary: SELinux policies for MicroShift BuildRequires: selinux-policy @@ -186,7 +148,6 @@ install -p -m644 packaging/crio.conf.d/microshift.conf %{buildroot}%{_sysconfdir install -d -m755 %{buildroot}/%{_unitdir} install -p -m644 packaging/systemd/microshift.service %{buildroot}%{_unitdir}/microshift.service -install -p -m644 packaging/systemd/microshift-containerized.service %{buildroot}%{_unitdir}/microshift-containerized.service mkdir -p -m755 %{buildroot}/var/run/flannel mkdir -p -m755 %{buildroot}/var/run/kubelet @@ -220,9 +181,6 @@ if [ $1 -eq 0 ]; then %selinux_modules_uninstall -s %{selinuxtype} microshift fi -%post containerized -mv /usr/lib/systemd/system/microshift-containerized.service /usr/lib/systemd/system/microshift.service - %posttrans selinux %selinux_relabel_post -s %{selinuxtype} @@ -250,11 +208,13 @@ mv /usr/lib/systemd/system/microshift-containerized.service /usr/lib/systemd/sys %{_datadir}/selinux/packages/%{selinuxtype}/microshift.pp.bz2 %ghost %{_sharedstatedir}/selinux/%{selinuxtype}/active/modules/200/microshift -%files containerized - -%{_unitdir}/microshift-containerized.service - %changelog +* Wed Feb 2 2022 Miguel Angel Ajo . 4.8.0-0.microshift-2022-01-06-210147-20 +- Remove the microshift-containerized subpackage, our docs explain how to download the .service file, + and it has proven problematic to package this. +- Fix the microshift.service being overwritten by microshift-containerized, even when the non-containerized + package only is installed. + * Thu Nov 4 2021 Miguel angel Ajo . 4.8.0-nightly-14-g973b9c78 - Add microshift-containerized subpackage which contains the microshift-containerized systemd definition. @@ -269,4 +229,4 @@ mv /usr/lib/systemd/system/microshift-containerized.service /usr/lib/systemd/sys - add missing BuildRequires on systemd and policycoreutils * Mon Sep 20 2021 Miguel Angel Ajo . 4.7.0-2021_08_31_224727 -- Initial packaging \ No newline at end of file +- Initial packaging From 0481d4e27ce3bd7a871589c767874039ed426a98 Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Wed, 2 Feb 2022 12:45:59 -0500 Subject: [PATCH 02/28] force packaging versions (#594) * force packaging versions Signed-off-by: Ryan Cook * new packaging message Signed-off-by: Ryan Cook --- packaging/rpm/microshift.spec | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/packaging/rpm/microshift.spec b/packaging/rpm/microshift.spec index 097924c8b63..0adfb7bf0ab 100644 --- a/packaging/rpm/microshift.spec +++ b/packaging/rpm/microshift.spec @@ -14,6 +14,10 @@ # SELinux specifics %global selinuxtype targeted +%define selinux_policyver 3.14.3-67 +%define container_policyver 2.167.0-1 +%define container_policy_epoch 2 + # Git related details %global shortcommit %(c=%{git_commit}; echo ${c:0:7}) @@ -80,9 +84,9 @@ Things break. But you can still help shape it, too. %package selinux Summary: SELinux policies for MicroShift -BuildRequires: selinux-policy -BuildRequires: selinux-policy-devel -Requires: container-selinux +BuildRequires: selinux-policy >= %{selinux_policyver} +BuildRequires: selinux-policy-devel >= %{selinux_policyver} +Requires: container-selinux >= %{container_policy_epoch}:%{container_policyver} BuildArch: noarch %{?selinux_requires} @@ -209,6 +213,9 @@ fi %ghost %{_sharedstatedir}/selinux/%{selinuxtype}/active/modules/200/microshift %changelog +* Wed Feb 2 2022 Ryan Cook . 4.8.0-0.microshift-2022_01_04_175420_25 +- Define specific selinux policy version to help manage selinux package + * Wed Feb 2 2022 Miguel Angel Ajo . 4.8.0-0.microshift-2022-01-06-210147-20 - Remove the microshift-containerized subpackage, our docs explain how to download the .service file, and it has proven problematic to package this. From a7deed1fb3b4815e76c458d0d0eb8171ef447357 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Wed, 2 Feb 2022 21:53:50 +0100 Subject: [PATCH 03/28] OpenShift CRD Manager service must signal stop when completed (#581) Otherwise the service manager can't track completion of the service when a stop is requested. Signed-off-by: Miguel Angel Ajo --- pkg/controllers/openshift-crd-manager.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/openshift-crd-manager.go b/pkg/controllers/openshift-crd-manager.go index 4b0ebf3722d..a61ccf5b270 100644 --- a/pkg/controllers/openshift-crd-manager.go +++ b/pkg/controllers/openshift-crd-manager.go @@ -39,10 +39,11 @@ func (s *OpenShiftCRDManager) Dependencies() []string { } func (s *OpenShiftCRDManager) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { - defer close(ready) - // To-DO add readiness check + defer close(stopped) + if err := assets.ApplyCRDs(s.cfg); err != nil { klog.Errorf("%s unable to apply default CRDs: %v", s.Name(), err) + return err } klog.Infof("%s applied default CRDs", s.Name()) @@ -51,5 +52,7 @@ func (s *OpenShiftCRDManager) Run(ctx context.Context, ready chan<- struct{}, st klog.Errorf("%s unable to confirm all CRDs are ready: %v", s.Name(), err) } klog.Infof("%s all CRDs are ready", s.Name()) + close(ready) + return ctx.Err() } From 497cb6b4a21f279b558f0ca1132a8d1c1b3db480 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 3 Feb 2022 11:37:37 +0100 Subject: [PATCH 04/28] Make kube-scheduler Service return errors for healthcheck (#584) Signed-off-by: Miguel Angel Ajo --- pkg/controllers/kube-scheduler.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/kube-scheduler.go b/pkg/controllers/kube-scheduler.go index 8144d4375a4..eccbb011735 100644 --- a/pkg/controllers/kube-scheduler.go +++ b/pkg/controllers/kube-scheduler.go @@ -17,6 +17,7 @@ package controllers import ( "context" + "errors" "fmt" "io/ioutil" "os" @@ -98,12 +99,14 @@ leaderElection: func (s *KubeScheduler) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { defer close(stopped) + errorChannel := make(chan error, 1) // run readiness check go func() { healthcheckStatus := util.RetryInsecureHttpsGet("https://127.0.0.1:10259/healthz") if healthcheckStatus != 200 { - klog.Fatalf("%s healthcheck failed", s.Name(), fmt.Errorf("kube-scheduler failed to start")) + klog.Errorf("%s healthcheck failed", s.Name(), fmt.Errorf("kube-scheduler failed to start")) + errorChannel <- errors.New("kube-scheduler healthcheck failed") } klog.Infof("%s is ready", s.Name()) @@ -115,9 +118,9 @@ func (s *KubeScheduler) Run(ctx context.Context, ready chan<- struct{}, stopped return err } - if err := kubescheduler.Run(ctx, cc, sched); err != nil { - return err - } + go func() { + errorChannel <- kubescheduler.Run(ctx, cc, sched) + }() - return ctx.Err() + return <-errorChannel } From 310d41a2d5ab172c18d23ff2a4ef6fedec96df68 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 3 Feb 2022 11:38:01 +0100 Subject: [PATCH 05/28] Make kube-api-server to return errors from healthcheck (#585) Signed-off-by: Miguel Angel Ajo --- pkg/controllers/kube-apiserver.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/kube-apiserver.go b/pkg/controllers/kube-apiserver.go index 399680f9ffc..fa61480858f 100644 --- a/pkg/controllers/kube-apiserver.go +++ b/pkg/controllers/kube-apiserver.go @@ -215,24 +215,25 @@ func (s *KubeAPIServer) configureOAuth(cfg *config.MicroshiftConfig) error { func (s *KubeAPIServer) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { defer close(stopped) + errorChannel := make(chan error, 1) // run readiness check go func() { restConfig, err := clientcmd.BuildConfigFromFlags("", s.kubeconfig) if err != nil { - klog.Warningf("%s readiness check: %v", s.Name(), err) - return + klog.Errorf("%s readiness check: %v", s.Name(), err) + errorChannel <- err } versionedClient, err := kubernetes.NewForConfig(restConfig) if err != nil { - klog.Warningf("%s readiness check: %v", s.Name(), err) - return + klog.Errorf("%s readiness check: %v", s.Name(), err) + errorChannel <- err } if genericcontrollermanager.WaitForAPIServer(versionedClient, kubeAPIStartupTimeout*time.Second) != nil { - klog.Warningf("%s readiness check timed out: %v", s.Name(), err) - return + klog.Errorf("%s readiness check timed out: %v", s.Name(), err) + errorChannel <- err } klog.Infof("%s is ready", s.Name()) @@ -249,8 +250,9 @@ func (s *KubeAPIServer) Run(ctx context.Context, ready chan<- struct{}, stopped return fmt.Errorf("%s configuration error: %v", s.Name(), utilerrors.NewAggregate(errs)) } - if err := kubeapiserver.Run(completedOptions, ctx.Done()); err != nil { - return err - } - return ctx.Err() + go func() { + errorChannel <- kubeapiserver.Run(completedOptions, ctx.Done()) + }() + + return <-errorChannel } From c5c2c9d6de5e80ad7c0ecff7a9d3aafdfde5c3ff Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Thu, 3 Feb 2022 09:56:06 -0500 Subject: [PATCH 06/28] Bot token (#597) If we decide to go the bot route this should accept the new secret to create the release --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c8f138bc078..38fe82af3b6 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -29,4 +29,4 @@ jobs: shell: bash run: make release TOKEN=${GITHUB_TOKEN} env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }} From 05e616f355905918d577a99402f40af7056e1202 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Mon, 7 Feb 2022 12:17:59 +0100 Subject: [PATCH 07/28] Make kube-controller-manager service return errors (#583) Signed-off-by: Miguel Angel Ajo --- pkg/controllers/kube-controller-manager.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/controllers/kube-controller-manager.go b/pkg/controllers/kube-controller-manager.go index 32ecfb783a3..bec3099d537 100644 --- a/pkg/controllers/kube-controller-manager.go +++ b/pkg/controllers/kube-controller-manager.go @@ -17,7 +17,7 @@ package controllers import ( "context" - "fmt" + "errors" "path/filepath" "github.com/spf13/cobra" @@ -95,12 +95,14 @@ func (s *KubeControllerManager) configure(cfg *config.MicroshiftConfig) { func (s *KubeControllerManager) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { defer close(stopped) + errorChannel := make(chan error, 1) // run readiness check go func() { healthcheckStatus := util.RetryInsecureHttpsGet("https://127.0.0.1:10257/healthz") if healthcheckStatus != 200 { - klog.Fatalf("", fmt.Errorf("kube-controller-manager failed to start")) + klog.Errorf("kube-controller-manager failed to start") + errorChannel <- errors.New("kube-controller-manager failed to start") } klog.Infof("%s is ready", s.Name()) @@ -117,10 +119,9 @@ func (s *KubeControllerManager) Run(ctx context.Context, ready chan<- struct{}, // return err //} - // Run runs the KubeControllerManagerOptions. This should never exit. - if err := kubecm.Run(c.Complete(), ctx.Done()); err != nil { - return err - } + go func() { + errorChannel <- kubecm.Run(c.Complete(), ctx.Done()) + }() - return ctx.Err() + return <-errorChannel } From 077be17e2b307f11bb20778126c671972592445c Mon Sep 17 00:00:00 2001 From: Ryan Cook Date: Wed, 9 Feb 2022 07:40:56 -0500 Subject: [PATCH 08/28] Fix of selinux directories (#600) * fix of selinux directories include selinux labeling only if enabled Signed-off-by: Ryan Cook --- packaging/rpm/microshift.spec | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/packaging/rpm/microshift.spec b/packaging/rpm/microshift.spec index 0adfb7bf0ab..e44640cef0c 100644 --- a/packaging/rpm/microshift.spec +++ b/packaging/rpm/microshift.spec @@ -17,6 +17,17 @@ %define selinux_policyver 3.14.3-67 %define container_policyver 2.167.0-1 %define container_policy_epoch 2 +%define microshift_relabel_files() \ + mkdir -p /var/hpvolumes; \ + mkdir -p /var/run/flannel; \ + mkdir -p /var/run/kubelet; \ + mkdir -p /var/lib/kubelet/pods; \ + mkdir -p /var/run/secrets/kubernetes.io/serviceaccount; \ + restorecon -R /var/hpvolumes; \ + restorecon -R /var/run/kubelet; \ + restorecon -R /var/run/flannel; \ + restorecon -R /var/lib/kubelet/pods; \ + restorecon -R /var/run/secrets/kubernetes.io/serviceaccount # Git related details @@ -157,7 +168,6 @@ mkdir -p -m755 %{buildroot}/var/run/flannel mkdir -p -m755 %{buildroot}/var/run/kubelet mkdir -p -m755 %{buildroot}/var/lib/kubelet/pods mkdir -p -m755 %{buildroot}/var/run/secrets/kubernetes.io/serviceaccount -mkdir -p -m755 %{buildroot}/var/hpvolumes install -d %{buildroot}%{_datadir}/selinux/packages/%{selinuxtype} install -m644 packaging/selinux/microshift.pp.bz2 %{buildroot}%{_datadir}/selinux/packages/%{selinuxtype} @@ -177,7 +187,7 @@ fi %selinux_modules_install -s %{selinuxtype} %{_datadir}/selinux/packages/%{selinuxtype}/microshift.pp.bz2 if /usr/sbin/selinuxenabled ; then %microshift_relabel_files -fi; +fi %postun selinux @@ -208,11 +218,13 @@ fi /var/run/kubelet /var/lib/kubelet/pods /var/run/secrets/kubernetes.io/serviceaccount -/var/hpvolumes %{_datadir}/selinux/packages/%{selinuxtype}/microshift.pp.bz2 %ghost %{_sharedstatedir}/selinux/%{selinuxtype}/active/modules/200/microshift %changelog +* Mon Feb 7 2022 Ryan Cook . 4.8.0-0.microshiftr-2022_02_02_194009_3 +- Selinux directory creation and labeling + * Wed Feb 2 2022 Ryan Cook . 4.8.0-0.microshift-2022_01_04_175420_25 - Define specific selinux policy version to help manage selinux package From 98f63029ea174b2dab56b89151985185f9936557 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Tue, 1 Mar 2022 19:52:33 +0100 Subject: [PATCH 09/28] Install /etc/crio/crio.conf.d for containerized MicroShift (#608) Otherwise some containers start before the flannel setup is ready. This configuration guarantees that MicroShift. Fixes-Issue: #605 Signed-off-by: Miguel Angel Ajo Pelayo --- packaging/images/microshift/Dockerfile | 7 ++++++- packaging/images/microshift/entrypoint.sh | 7 +++++++ packaging/systemd/microshift-containerized.service | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100755 packaging/images/microshift/entrypoint.sh diff --git a/packaging/images/microshift/Dockerfile b/packaging/images/microshift/Dockerfile index 4b4d153c396..a450ade96fb 100644 --- a/packaging/images/microshift/Dockerfile +++ b/packaging/images/microshift/Dockerfile @@ -29,7 +29,12 @@ RUN microdnf install -y \ && microdnf clean all COPY --from=builder /opt/app-root/src/github.com/redhat-et/microshift/_output/bin/linux_$ARCH/microshift /usr/bin/microshift -ENTRYPOINT ["/usr/bin/microshift"] +RUN mkdir -p /root/crio.conf.d + +COPY packaging/crio.conf.d/microshift.conf /root/crio.conf.d/microshift.conf +COPY packaging/images/microshift/entrypoint.sh /root/entrypoint.sh + +ENTRYPOINT ["/root/entrypoint.sh"] CMD ["run"] # To start: diff --git a/packaging/images/microshift/entrypoint.sh b/packaging/images/microshift/entrypoint.sh new file mode 100755 index 00000000000..dcd935e0384 --- /dev/null +++ b/packaging/images/microshift/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +mkdir -p /etc/crio/crio.conf.d +cp /root/crio.conf.d/microshift.conf /etc/crio/crio.conf.d/microshift.conf + +# switch to microshift process +exec /usr/bin/microshift run diff --git a/packaging/systemd/microshift-containerized.service b/packaging/systemd/microshift-containerized.service index d74af64f5c3..b6e82a29d8a 100644 --- a/packaging/systemd/microshift-containerized.service +++ b/packaging/systemd/microshift-containerized.service @@ -15,7 +15,7 @@ Restart=on-failure TimeoutStopSec=70 ExecStartPre=/usr/bin/mkdir -p /var/lib/kubelet ; /usr/bin/mkdir -p /var/hpvolumes ExecStartPre=/bin/rm -f %t/%n.ctr-id -ExecStart=/usr/bin/podman run --cidfile=%t/%n.ctr-id --cgroups=no-conmon --rm --replace --sdnotify=container --label io.containers.autoupdate=registry --network=host --privileged -d --name microshift -v /var/hpvolumes:/var/hpvolumes:z,rw,rshared -v /var/run/crio/crio.sock:/var/run/crio/crio.sock:rw,rshared -v microshift-data:/var/lib/microshift:rw,rshared -v /var/lib/kubelet:/var/lib/kubelet:z,rw,rshared -v /var/log:/var/log quay.io/microshift/microshift:latest +ExecStart=/usr/bin/podman run --cidfile=%t/%n.ctr-id --cgroups=no-conmon --rm --replace --sdnotify=container --label io.containers.autoupdate=registry --network=host --privileged -d --name microshift -v /var/hpvolumes:/var/hpvolumes:z,rw,rshared -v /var/run/crio/crio.sock:/var/run/crio/crio.sock:rw,rshared -v microshift-data:/var/lib/microshift:rw,rshared -v /var/lib/kubelet:/var/lib/kubelet:z,rw,rshared -v /var/log:/var/log -v /etc:/etc quay.io/microshift/microshift:latest ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id Type=notify From 975561da5be919c3196e58b91eaa572d1d8239b2 Mon Sep 17 00:00:00 2001 From: Parul Singh Date: Fri, 4 Mar 2022 05:00:32 -0500 Subject: [PATCH 10/28] Pre-loading container images into CRI-O (#568) microshift-images.spec for additional r/o container image stores. Signed-off-by: Parul Co-authored-by: Miguel Angel Ajo --- .../rpm/make-microshift-app-images-rpm.sh | 83 +++++++++++++ packaging/rpm/make-microshift-images-rpm.sh | 48 ++++++++ packaging/rpm/microshift-images.spec | 116 ++++++++++++++++++ pkg/release/get.sh | 41 +++++++ 4 files changed, 288 insertions(+) create mode 100755 packaging/rpm/make-microshift-app-images-rpm.sh create mode 100755 packaging/rpm/make-microshift-images-rpm.sh create mode 100644 packaging/rpm/microshift-images.spec create mode 100755 pkg/release/get.sh diff --git a/packaging/rpm/make-microshift-app-images-rpm.sh b/packaging/rpm/make-microshift-app-images-rpm.sh new file mode 100755 index 00000000000..1c52ce1d3ca --- /dev/null +++ b/packaging/rpm/make-microshift-app-images-rpm.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +# First arg: file path containing user images per line +# Second arg: container storage dir path +# Third arg: RPMBUILD_DIR + +RPMBUILD_DIR=$3 +_img_dir_=$2 + +declare -a ARRAY + +#link filedescriptor 10 with stdin (standard input) +exec 10<&0 + +#stdin replaced with a file supplied as a first argument +exec < $1 +let count=0 + +#read user images into ARRAY +while read LINE; do + ARRAY[$count]=$LINE + ((count++)) +done + +#restore stdin from file descriptor 10 then close filedescriptor 10 +exec 0<&10 10<&- + +#Generate microshift-app-images.spec +touch ./microshift-app-images.spec +cat >./microshift-app-images.spec < /dev/null && pwd )" +BASE_VERSION="$(${SCRIPT_DIR}/../../pkg/release/get.sh base)" +TARBALL_FILE="microshift-pkg-release-${BASE_VERSION}.tar.gz" +RPMBUILD_DIR="${SCRIPT_DIR}/_rpmbuild/" +BUILD=${BUILD:-$2} +BUILD=${BUILD:-all} +TARGET=${TARGET:-$3} +TARGET=${TARGET:-x86_64} + + +case $BUILD in + all) RPMBUILD_OPT=-ba ;; + rpm) RPMBUILD_OPT=-bb ;; + srpm) RPMBUILD_OPT=-bs ;; +esac + +ARCHITECTURES=${ARCHITECTURES:-"x86_64 arm64 arm ppc64le riscv64"} + +build() { + cat >"${RPMBUILD_DIR}"SPECS/microshift-images.spec <> "${RPMBUILD_DIR}"SPECS/microshift-images.spec + echo "" >> "${RPMBUILD_DIR}"SPECS/microshift-images.spec + done + + cat "${SCRIPT_DIR}/microshift-images.spec" >> "${RPMBUILD_DIR}SPECS/microshift-images.spec" + + sudo rpmbuild "${RPMBUILD_OPT}" --target $TARGET --define "_topdir ${RPMBUILD_DIR}" "${RPMBUILD_DIR}SPECS/microshift-images.spec" +} + +# prepare the rpmbuild env +mkdir -p "${RPMBUILD_DIR}"/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + +case $1 in + local) + build + ;; + *) + echo "Usage: $0 local [all|rpm|srpm]" + exit 1 +esac diff --git a/packaging/rpm/microshift-images.spec b/packaging/rpm/microshift-images.spec new file mode 100644 index 00000000000..6d2abcabbec --- /dev/null +++ b/packaging/rpm/microshift-images.spec @@ -0,0 +1,116 @@ +Name: microshift-images + +# disable dynamic rpmbuild checks +%global __os_install_post /bin/true +%global __arch_install_post /bin/true +AutoReqProv: no + +# where do we want the images to be stored on the final system +%global imageStore /opt/microshift/images +%global imageStoreSed %(echo %{imageStore} | sed 's/\//\\\//g') + +%define version %(echo %{baseVersion} | sed s/-/_/g) + +# to-be-improved: +# avoid warnings for container layers: +# - warning: absolute symlink: +# - warning: Duplicate build-ids + +Version: %{version} +Release: 2 + +Summary: MicroShift related container images +License: Apache License 2.0 +URL: https://github.com/redhat-et/microshift + +BuildRequires: podman +Requires: crio + + +%description +This rpm creates a custom RO container storage for the MicroShift container images +and pull images and add path to additional container image stores. + +%prep + + +if [ -d %{buildroot}%{imageStore} ] +then + sudo rm -rf %{buildroot}%{imageStore} +fi + +%build + + +%install + +mkdir -p %{buildroot}%{imageStore} + +%define arch %{_arch} + +# aarch64 is arm64 for container regisitries + +%ifarch %{arm} aarch64 +%define arch arm64 +%endif + +pull_arch="--arch %{arch}" + +# for x86_64 we don't want to specify the arch otherwise quay gets grumpy + +%ifarch x86_64 +pull_arch="" +images=%{images_x86_64} +%endif + +%ifarch %{arm} +images=%{images_arm} +%endif + +%ifarch %{arm} aarch64 +images=%{images_arm64} +%endif + +%ifarch ppc64le +images=%{images_ppc64le} +%endif + +%ifarch riscv64 +images=%{images_riscv64} +%endif + + +for val in ${images}; do + podman pull ${pull_arch} --root %{buildroot}%{imageStore} $val +done + +# check, why do we need this? +# sudo chmod -R a+rx %{imageStore} + +%post + +# only on install (1), not on upgrades (2) +if [ $1 -eq 1 ]; then + sed -i '/^additionalimagestores =*/a "%{imageStore}",' /etc/containers/storage.conf + # if crio was already started, restart it so it read from new imagestore + systemctl is-active --quiet crio && systemctl restart --quiet crio +fi + +%postun + +# only on uninstall (0), not on upgrades(1) +if [ $1 -eq 0 ]; + sed -i '/"${imageStoreSed}",/d" /etc/containers/storage.conf + systemctl is-active --quiet crio && systemctl restart --quiet crio + +fi + +%files +%{imageStore}/* + +%changelog +* Wed Mar 2 2022 Miguel Angel Ajo . 4.8.0_0.okd_2021_10_10_030117-2 +- Automatically get architecture images and OKD base version + +* Wed Feb 16 2022 Parul Singh . 4.8.0-0.microshiftr-2022_02_02_194009_3 +- Initial packaging of additional RO container storage. diff --git a/pkg/release/get.sh b/pkg/release/get.sh new file mode 100755 index 00000000000..2b3ad3ddffd --- /dev/null +++ b/pkg/release/get.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +function get_base { + grep "var Base" "${SCRIPT_DIR}/release.go" | cut -d\" -f 2 +} + +function add_bases { + base=$(get_base) + sed "s/:$/:${base}/g" # some lines have "xxxxx:" + Base like flannel +} + +function get_image_list { + + cat $1 | grep "Image = map\[string\]string" -A 100 | grep '":' | cut -d\" -f4 | \ + add_bases +} + +function get_images { + arch=$1 + case $arch in + x86_64|amd64) get_image_list "${SCRIPT_DIR}/release_amd64.go" ;; + *) get_image_list "${SCRIPT_DIR}/release.go" ;; + esac +} + +function usage { + echo "usage:" + echo " get.sh base : prints the OKD base version for this MicroShift codebase" + echo " get.sh images : prints image list used by this MicroShift codebase and architecture" + exit 1 +} + +case $1 in + base) get_base ;; + images) get_images $2 ;; + *) usage +esac + + From 2c2dd657dddc6eb4372432c8bb343c80fbd79839 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Fri, 4 Mar 2022 11:01:36 +0100 Subject: [PATCH 11/28] Specify the right pause image used by cri-o (#611) Otherwise the later container rpm-packaging patches will grab the wrong image, and the pause image is missing for offline systems. Signed-off-by: Miguel Angel Ajo --- pkg/release/release_amd64.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/release/release_amd64.go b/pkg/release/release_amd64.go index 2727358a2b1..1dac5193121 100644 --- a/pkg/release/release_amd64.go +++ b/pkg/release/release_amd64.go @@ -28,7 +28,7 @@ func init() { "kube_flannel_cni": "quay.io/microshift/flannel-cni:" + Base, "kube_rbac_proxy": "quay.io/openshift/okd-content@sha256:459f15f0e457edaf04fa1a44be6858044d9af4de276620df46dc91a565ddb4ec", "kubevirt_hostpath_provisioner": "quay.io/kubevirt/hostpath-provisioner:v0.8.0", - "pause": "k8s.gcr.io/pause", + "pause": "k8s.gcr.io/pause:3.2", "service_ca_operator": "quay.io/openshift/okd-content@sha256:dd1cd4d7b1f2d097eaa965bc5e2fe7ebfe333d6cbaeabc7879283af1a88dbf4e", } } From 27241be8ff7b9f0b36d42b3b1fd3b8a09ca80e3b Mon Sep 17 00:00:00 2001 From: Adam Kaplan Date: Mon, 7 Mar 2022 09:30:48 -0500 Subject: [PATCH 12/28] Set Default Audit Log Retention (#616) If an audit log directory is provided, set the default audit log retention age to 7 days. When `--audit-log-maxage` is not specified, kube-apiserver audit logs are retained indefinitely, taking up signficant space on the host node. --- pkg/controllers/kube-apiserver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controllers/kube-apiserver.go b/pkg/controllers/kube-apiserver.go index fa61480858f..6fe70a17e13 100644 --- a/pkg/controllers/kube-apiserver.go +++ b/pkg/controllers/kube-apiserver.go @@ -118,6 +118,7 @@ func (s *KubeAPIServer) configure(cfg *config.MicroshiftConfig) { if cfg.AuditLogDir != "" { args = append(args, "--audit-log-path="+filepath.Join(cfg.AuditLogDir, "kube-apiserver-audit.log")) + args = append(args, "--audit-log-maxage=7") } // fake the kube-apiserver cobra command to parse args into serverOptions From 58acf59a6e027f62bf033b75d5619dc5db369af3 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Mon, 7 Mar 2022 20:44:15 +0100 Subject: [PATCH 13/28] Pack containers into GitHub CI (#615) * Pack containers during GitHub CI release.sh These .tar.gzs can be later used by common non-podman capable builders (copr) to create rpms with the container images. Signed-off-by: Miguel Angel Ajo * Address comments for Makefile --- .github/workflows/release.yaml | 2 +- Makefile | 8 +++++ packaging/images/components/archive.sh | 43 ++++++++++++++++++++++++++ scripts/release.sh | 3 ++ 4 files changed, 55 insertions(+), 1 deletion(-) create mode 100755 packaging/images/components/archive.sh diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 38fe82af3b6..0718c1e447c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -20,7 +20,7 @@ jobs: password: ${{ secrets.REGISTRY_PASSWORD }} - name: Install required packages - run: sudo apt-get update && sudo apt install build-essential qemu-user qemu-user-static + run: sudo apt-get update && sudo apt install build-essential qemu-user qemu-user-static podman - name: Checkout source uses: actions/checkout@v2 diff --git a/Makefile b/Makefile index 9d541fed822..0edff7d2985 100644 --- a/Makefile +++ b/Makefile @@ -218,6 +218,14 @@ build-containerized-all-in-one-iptables-arm64: +$(MAKE) _build_containerized_aio ARCH=arm64 IPTABLES=iptables .PHONY: build-containerized-all-in-one-iptables-arm64 +############################### +# container image packaging # +############################### + +tar-ocp-containers: + sudo ./packaging/images/components/archive.sh +.PHONY: image-tars + ############################### # dev targets # ############################### diff --git a/packaging/images/components/archive.sh b/packaging/images/components/archive.sh new file mode 100755 index 00000000000..91f106c3392 --- /dev/null +++ b/packaging/images/components/archive.sh @@ -0,0 +1,43 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +get="${SCRIPT_DIR}/../../../pkg/release/get.sh" + +ARCHITECTURES=${ARCHITECTURES:-"arm64 amd64"} +BASE_VERSION=${BASE_VERSION:-$("${get}" base)} +OUTPUT_DIR=${OUTPUT_DIR:-$(pwd)/archive} + +TMP_DIR=$(mktemp -d) + +mkdir -p "${OUTPUT_DIR}" +chmod a+rwx "${OUTPUT_DIR}" + +for arch in $ARCHITECTURES; do + images=$("${get}" images $arch) + storage="${TMP_DIR}/${arch}/containers" + mkdir -p "${storage}" + echo "Pulling images for architecture ${arch} ===================" + for image in $images; do + echo pulling $image @$arch + # some imported images are armhfp instead of arm + podman pull --arch $arch --root "${storage}" "${image}" + if [ $? -ne 0 ]; then + if [ "${arch}" == "arm" ]; then + echo "Fallback arm -> armhfp" + podman pull --arch armhfp --root "${TMP_DIR}/${arch}" "${image}" || exit 1 + else + echo "Couldn't pull image ${image} for ${arch}" + exit 1 + fi + fi + done + + echo "" + echo "Packing tarball for architecture ${arch} ==================" + pushd ${storage} + output_file="${OUTPUT_DIR}/microshift-containers-${BASE_VERSION}-${arch}.tar.bz2" + echo " > ${output_file}" + tar cfj "${OUTPUT_DIR}/microshift-containers-${BASE_VERSION}-${arch}.tar.bz2" . + chmod a+rw "${OUTPUT_DIR}/microshift-containers-${BASE_VERSION}-${arch}.tar.bz2" + popd + rm -rf ${storage} +done diff --git a/scripts/release.sh b/scripts/release.sh index ab53f3ee3d6..f93560b5116 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -304,6 +304,9 @@ if [ $NIGHTLY -eq 1 ]; then exit 0 fi +# create container tar.gzs for the consumed container images for non-nightly releases +ARCHITECTURES="amd64 arm64" OUTPUT_DIR="${STAGE_DIR}" sudo -E "${ROOT}/packaging/image/components/archive.sh" || exit 1 + # publish binaries UPLOAD_URL="$(git_create_release "$API_DATA" "$TOKEN")" || exit 1 git_post_artifacts "$STAGE_DIR" "$UPLOAD_URL" "$TOKEN" || exit 1 From 05a57a5e75cf74199d98d55769f454fdd2ed71a9 Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Mon, 7 Mar 2022 17:36:20 -0500 Subject: [PATCH 14/28] Use default GOMAXPROCS behavior (#621) --- cmd/microshift/main.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/microshift/main.go b/cmd/microshift/main.go index f01aeceddcd..0b2ddbf2c1a 100644 --- a/cmd/microshift/main.go +++ b/cmd/microshift/main.go @@ -4,7 +4,6 @@ import ( "fmt" "math/rand" "os" - "runtime" "time" "github.com/spf13/cobra" @@ -24,10 +23,6 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - if len(os.Getenv("GOMAXPROCS")) == 0 { - runtime.GOMAXPROCS(runtime.NumCPU()) - } - command := newCommand() if err := command.Execute(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) From e7cd34fda08c90a1863c496584bf78815ae1a069 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Tue, 8 Mar 2022 10:33:20 +0100 Subject: [PATCH 15/28] fix typo in reference (#623) Signed-off-by: Miguel Angel Ajo --- scripts/release.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release.sh b/scripts/release.sh index f93560b5116..b067a4d5a22 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -305,7 +305,7 @@ if [ $NIGHTLY -eq 1 ]; then fi # create container tar.gzs for the consumed container images for non-nightly releases -ARCHITECTURES="amd64 arm64" OUTPUT_DIR="${STAGE_DIR}" sudo -E "${ROOT}/packaging/image/components/archive.sh" || exit 1 +ARCHITECTURES="amd64 arm64" OUTPUT_DIR="${STAGE_DIR}" sudo -E "${ROOT}/packaging/images/components/archive.sh" || exit 1 # publish binaries UPLOAD_URL="$(git_create_release "$API_DATA" "$TOKEN")" || exit 1 From 06ca50db6434d945ee8036f5c3a919209239b823 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Fri, 11 Mar 2022 13:03:27 +0100 Subject: [PATCH 16/28] Fix AIO image to properly configure crio for MicroShift networking (#628) Fixes-Issue: #606 Signed-off-by: Miguel Angel Ajo Pelayo --- packaging/images/microshift-aio/Dockerfile | 2 +- .../images/microshift-aio/crio-bridge.conf | 18 ------------------ 2 files changed, 1 insertion(+), 19 deletions(-) delete mode 100644 packaging/images/microshift-aio/crio-bridge.conf diff --git a/packaging/images/microshift-aio/Dockerfile b/packaging/images/microshift-aio/Dockerfile index d7391aea7f2..2d9199efd7c 100644 --- a/packaging/images/microshift-aio/Dockerfile +++ b/packaging/images/microshift-aio/Dockerfile @@ -40,7 +40,7 @@ ENV BUILD_PATH=packaging/images/microshift-aio COPY --from=builder /opt/app-root/src/github.com/redhat-et/microshift/microshift /usr/local/bin/microshift COPY $BUILD_PATH/unit /usr/lib/systemd/system/microshift.service COPY $BUILD_PATH/kubelet-cgroups.conf /etc/systemd/system.conf.d/kubelet-cgroups.conf -COPY $BUILD_PATH/crio-bridge.conf /etc/cni/net.d/100-crio-bridge.conf +COPY packaging/crio.conf.d/microshift.conf /etc/crio/crio.conf.d/microshift.conf # OCP_VERSION pushed ahead to 4.9.11 because aarch64 is now available, and it is backwards compatible RUN export OCP_VERSION=4.9.11 && \ diff --git a/packaging/images/microshift-aio/crio-bridge.conf b/packaging/images/microshift-aio/crio-bridge.conf deleted file mode 100644 index 540fc11aae9..00000000000 --- a/packaging/images/microshift-aio/crio-bridge.conf +++ /dev/null @@ -1,18 +0,0 @@ -{ - "cniVersion": "0.4.0", - "name": "crio", - "type": "bridge", - "bridge": "cni0", - "isGateway": true, - "ipMasq": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "routes": [ - { "dst": "0.0.0.0/0" } - ], - "ranges": [ - [{ "subnet": "10.42.0.0/24" }] - ] - } -} \ No newline at end of file From c7afae3d4b529942d40fd989f342dcb555db627a Mon Sep 17 00:00:00 2001 From: Parul Singh Date: Tue, 15 Mar 2022 06:53:32 -0400 Subject: [PATCH 17/28] update LICENSE (#625) * update LICENSE Signed-off-by: Parul * update date Signed-off-by: Parul --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index d6456956733..492c9d0ddfa 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2022 MicroShift Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 9b7e3f82936a3279b4606946ff716f98a924d931 Mon Sep 17 00:00:00 2001 From: Frank Zdarsky <13062727+fzdarsky@users.noreply.github.com> Date: Tue, 15 Mar 2022 11:58:19 +0100 Subject: [PATCH 18/28] Reorganize assets (#619) * Reorganize assets Regroups assets by component rather than kind to simplify rebasing. Signed-off-by: Frank A. Zdarsky * Regenerate bindata Signed-off-by: Frank A. Zdarsky --- .../flannel/clusterrole.yaml} | 0 .../flannel/clusterrolebinding.yaml} | 0 .../flannel/configmap.yaml} | 0 .../flannel/daemonset.yaml} | 0 .../flannel/podsecuritypolicy.yaml} | 0 .../flannel/service-account.yaml} | 0 .../hostpath-provisioner/clusterrole.yaml} | 0 .../clusterrolebinding.yaml} | 0 .../hostpath-provisioner/daemonset.yaml} | 0 .../hostpath-provisioner/namespace.yaml} | 0 .../hostpath-provisioner/scc.yaml} | 0 .../service-account.yaml} | 0 .../hostpath-provisioner/storageclass.yaml} | 0 .../dns/cluster-role-binding.yaml} | 0 .../openshift-dns/dns/cluster-role.yaml} | 0 .../openshift-dns/dns/configmap.yaml} | 0 .../openshift-dns/dns/daemonset.yaml} | 0 .../openshift-dns/dns/namespace.yaml} | 0 .../openshift-dns/dns/service-account.yaml} | 0 .../openshift-dns/dns/service.yaml} | 0 .../node-resolver/daemonset.yaml} | 0 .../node-resolver/service-account.yaml} | 0 .../cluster-role-binding.yaml} | 0 .../openshift-router/cluster-role.yaml} | 0 .../openshift-router/configmap.yaml} | 0 .../openshift-router/deployment.yaml} | 0 .../openshift-router/namespace.yaml} | 0 .../openshift-router/service-account.yaml} | 0 .../openshift-router/service-cloud.yaml} | 0 .../openshift-router/service-internal.yaml} | 0 .../service-ca/clusterrole.yaml} | 0 .../service-ca/clusterrolebinding.yaml} | 0 .../service-ca/deployment.yaml} | 0 .../service-ca/ns.yaml} | 0 .../service-ca/role.yaml} | 0 .../service-ca/rolebinding.yaml} | 0 .../service-ca/sa.yaml} | 0 .../service-ca/signing-cabundle.yaml} | 0 .../service-ca/signing-secret.yaml} | 0 pkg/assets/applier.go | 0 pkg/assets/apps.go | 4 +- pkg/assets/apps/bindata.go | 904 ------ pkg/assets/{crd => }/bindata.go | 2525 ++++++++++++++++- pkg/assets/core.go | 4 +- pkg/assets/core/bindata.go | 825 ------ pkg/assets/crd.go | 5 +- pkg/assets/rbac.go | 4 +- pkg/assets/rbac/bindata.go | 835 ------ pkg/assets/scc.go | 4 +- pkg/assets/scc/bindata.go | 705 ----- pkg/assets/storage.go | 4 +- pkg/assets/storage/bindata.go | 228 -- pkg/components/controllers.go | 52 +- pkg/components/networking.go | 12 +- pkg/components/storage.go | 14 +- scripts/bindata.sh | 11 +- 56 files changed, 2568 insertions(+), 3568 deletions(-) rename assets/{rbac/0000_00_flannel-clusterrole.yaml => components/flannel/clusterrole.yaml} (100%) rename assets/{rbac/0000_00_flannel-clusterrolebinding.yaml => components/flannel/clusterrolebinding.yaml} (100%) rename assets/{core/0000_00_flannel-configmap.yaml => components/flannel/configmap.yaml} (100%) rename assets/{apps/0000_00_flannel-daemonset.yaml => components/flannel/daemonset.yaml} (100%) rename assets/{rbac/0000_00_podsecuritypolicy-flannel.yaml => components/flannel/podsecuritypolicy.yaml} (100%) rename assets/{core/0000_00_flannel-service-account.yaml => components/flannel/service-account.yaml} (100%) rename assets/{rbac/0000_80_hostpath-provisioner-clusterrole.yaml => components/hostpath-provisioner/clusterrole.yaml} (100%) rename assets/{rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml => components/hostpath-provisioner/clusterrolebinding.yaml} (100%) rename assets/{apps/000_80_hostpath-provisioner-daemonset.yaml => components/hostpath-provisioner/daemonset.yaml} (100%) rename assets/{core/0000_80_hostpath-provisioner-namespace.yaml => components/hostpath-provisioner/namespace.yaml} (100%) rename assets/{scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml => components/hostpath-provisioner/scc.yaml} (100%) rename assets/{core/0000_80_hostpath-provisioner-serviceaccount.yaml => components/hostpath-provisioner/service-account.yaml} (100%) rename assets/{storage/0000_80_hostpath-provisioner-storageclass.yaml => components/hostpath-provisioner/storageclass.yaml} (100%) rename assets/{rbac/0000_70_dns_01-cluster-role-binding.yaml => components/openshift-dns/dns/cluster-role-binding.yaml} (100%) rename assets/{rbac/0000_70_dns_01-cluster-role.yaml => components/openshift-dns/dns/cluster-role.yaml} (100%) rename assets/{core/0000_70_dns_01-configmap.yaml => components/openshift-dns/dns/configmap.yaml} (100%) rename assets/{apps/0000_70_dns_01-dns-daemonset.yaml => components/openshift-dns/dns/daemonset.yaml} (100%) rename assets/{core/0000_70_dns_00-namespace.yaml => components/openshift-dns/dns/namespace.yaml} (100%) rename assets/{core/0000_70_dns_01-dns-service-account.yaml => components/openshift-dns/dns/service-account.yaml} (100%) rename assets/{core/0000_70_dns_01-service.yaml => components/openshift-dns/dns/service.yaml} (100%) rename assets/{apps/0000_70_dns_01-node-resolver-daemonset.yaml => components/openshift-dns/node-resolver/daemonset.yaml} (100%) rename assets/{core/0000_70_dns_01-node-resolver-service-account.yaml => components/openshift-dns/node-resolver/service-account.yaml} (100%) rename assets/{rbac/0000_80_openshift-router-cluster-role-binding.yaml => components/openshift-router/cluster-role-binding.yaml} (100%) rename assets/{rbac/0000_80_openshift-router-cluster-role.yaml => components/openshift-router/cluster-role.yaml} (100%) rename assets/{core/0000_80_openshift-router-cm.yaml => components/openshift-router/configmap.yaml} (100%) rename assets/{apps/0000_80_openshift-router-deployment.yaml => components/openshift-router/deployment.yaml} (100%) rename assets/{core/0000_80_openshift-router-namespace.yaml => components/openshift-router/namespace.yaml} (100%) rename assets/{core/0000_80_openshift-router-service-account.yaml => components/openshift-router/service-account.yaml} (100%) rename assets/{core/0000_80_openshift-router-external-service.yaml => components/openshift-router/service-cloud.yaml} (100%) rename assets/{core/0000_80_openshift-router-service.yaml => components/openshift-router/service-internal.yaml} (100%) rename assets/{rbac/0000_60_service-ca_00_clusterrole.yaml => components/service-ca/clusterrole.yaml} (100%) rename assets/{rbac/0000_60_service-ca_00_clusterrolebinding.yaml => components/service-ca/clusterrolebinding.yaml} (100%) rename assets/{apps/0000_60_service-ca_05_deploy.yaml => components/service-ca/deployment.yaml} (100%) rename assets/{core/0000_60_service-ca_01_namespace.yaml => components/service-ca/ns.yaml} (100%) rename assets/{rbac/0000_60_service-ca_00_role.yaml => components/service-ca/role.yaml} (100%) rename assets/{rbac/0000_60_service-ca_00_rolebinding.yaml => components/service-ca/rolebinding.yaml} (100%) rename assets/{core/0000_60_service-ca_04_sa.yaml => components/service-ca/sa.yaml} (100%) rename assets/{core/0000_60_service-ca_04_configmap.yaml => components/service-ca/signing-cabundle.yaml} (100%) rename assets/{core/0000_60_service-ca_04_secret.yaml => components/service-ca/signing-secret.yaml} (100%) mode change 100755 => 100644 pkg/assets/applier.go mode change 100755 => 100644 pkg/assets/apps.go delete mode 100644 pkg/assets/apps/bindata.go rename pkg/assets/{crd => }/bindata.go (69%) mode change 100755 => 100644 pkg/assets/core.go delete mode 100644 pkg/assets/core/bindata.go mode change 100755 => 100644 pkg/assets/crd.go mode change 100755 => 100644 pkg/assets/rbac.go delete mode 100644 pkg/assets/rbac/bindata.go delete mode 100644 pkg/assets/scc/bindata.go delete mode 100644 pkg/assets/storage/bindata.go diff --git a/assets/rbac/0000_00_flannel-clusterrole.yaml b/assets/components/flannel/clusterrole.yaml similarity index 100% rename from assets/rbac/0000_00_flannel-clusterrole.yaml rename to assets/components/flannel/clusterrole.yaml diff --git a/assets/rbac/0000_00_flannel-clusterrolebinding.yaml b/assets/components/flannel/clusterrolebinding.yaml similarity index 100% rename from assets/rbac/0000_00_flannel-clusterrolebinding.yaml rename to assets/components/flannel/clusterrolebinding.yaml diff --git a/assets/core/0000_00_flannel-configmap.yaml b/assets/components/flannel/configmap.yaml similarity index 100% rename from assets/core/0000_00_flannel-configmap.yaml rename to assets/components/flannel/configmap.yaml diff --git a/assets/apps/0000_00_flannel-daemonset.yaml b/assets/components/flannel/daemonset.yaml similarity index 100% rename from assets/apps/0000_00_flannel-daemonset.yaml rename to assets/components/flannel/daemonset.yaml diff --git a/assets/rbac/0000_00_podsecuritypolicy-flannel.yaml b/assets/components/flannel/podsecuritypolicy.yaml similarity index 100% rename from assets/rbac/0000_00_podsecuritypolicy-flannel.yaml rename to assets/components/flannel/podsecuritypolicy.yaml diff --git a/assets/core/0000_00_flannel-service-account.yaml b/assets/components/flannel/service-account.yaml similarity index 100% rename from assets/core/0000_00_flannel-service-account.yaml rename to assets/components/flannel/service-account.yaml diff --git a/assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml b/assets/components/hostpath-provisioner/clusterrole.yaml similarity index 100% rename from assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml rename to assets/components/hostpath-provisioner/clusterrole.yaml diff --git a/assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml b/assets/components/hostpath-provisioner/clusterrolebinding.yaml similarity index 100% rename from assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml rename to assets/components/hostpath-provisioner/clusterrolebinding.yaml diff --git a/assets/apps/000_80_hostpath-provisioner-daemonset.yaml b/assets/components/hostpath-provisioner/daemonset.yaml similarity index 100% rename from assets/apps/000_80_hostpath-provisioner-daemonset.yaml rename to assets/components/hostpath-provisioner/daemonset.yaml diff --git a/assets/core/0000_80_hostpath-provisioner-namespace.yaml b/assets/components/hostpath-provisioner/namespace.yaml similarity index 100% rename from assets/core/0000_80_hostpath-provisioner-namespace.yaml rename to assets/components/hostpath-provisioner/namespace.yaml diff --git a/assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml b/assets/components/hostpath-provisioner/scc.yaml similarity index 100% rename from assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml rename to assets/components/hostpath-provisioner/scc.yaml diff --git a/assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml b/assets/components/hostpath-provisioner/service-account.yaml similarity index 100% rename from assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml rename to assets/components/hostpath-provisioner/service-account.yaml diff --git a/assets/storage/0000_80_hostpath-provisioner-storageclass.yaml b/assets/components/hostpath-provisioner/storageclass.yaml similarity index 100% rename from assets/storage/0000_80_hostpath-provisioner-storageclass.yaml rename to assets/components/hostpath-provisioner/storageclass.yaml diff --git a/assets/rbac/0000_70_dns_01-cluster-role-binding.yaml b/assets/components/openshift-dns/dns/cluster-role-binding.yaml similarity index 100% rename from assets/rbac/0000_70_dns_01-cluster-role-binding.yaml rename to assets/components/openshift-dns/dns/cluster-role-binding.yaml diff --git a/assets/rbac/0000_70_dns_01-cluster-role.yaml b/assets/components/openshift-dns/dns/cluster-role.yaml similarity index 100% rename from assets/rbac/0000_70_dns_01-cluster-role.yaml rename to assets/components/openshift-dns/dns/cluster-role.yaml diff --git a/assets/core/0000_70_dns_01-configmap.yaml b/assets/components/openshift-dns/dns/configmap.yaml similarity index 100% rename from assets/core/0000_70_dns_01-configmap.yaml rename to assets/components/openshift-dns/dns/configmap.yaml diff --git a/assets/apps/0000_70_dns_01-dns-daemonset.yaml b/assets/components/openshift-dns/dns/daemonset.yaml similarity index 100% rename from assets/apps/0000_70_dns_01-dns-daemonset.yaml rename to assets/components/openshift-dns/dns/daemonset.yaml diff --git a/assets/core/0000_70_dns_00-namespace.yaml b/assets/components/openshift-dns/dns/namespace.yaml similarity index 100% rename from assets/core/0000_70_dns_00-namespace.yaml rename to assets/components/openshift-dns/dns/namespace.yaml diff --git a/assets/core/0000_70_dns_01-dns-service-account.yaml b/assets/components/openshift-dns/dns/service-account.yaml similarity index 100% rename from assets/core/0000_70_dns_01-dns-service-account.yaml rename to assets/components/openshift-dns/dns/service-account.yaml diff --git a/assets/core/0000_70_dns_01-service.yaml b/assets/components/openshift-dns/dns/service.yaml similarity index 100% rename from assets/core/0000_70_dns_01-service.yaml rename to assets/components/openshift-dns/dns/service.yaml diff --git a/assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml b/assets/components/openshift-dns/node-resolver/daemonset.yaml similarity index 100% rename from assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml rename to assets/components/openshift-dns/node-resolver/daemonset.yaml diff --git a/assets/core/0000_70_dns_01-node-resolver-service-account.yaml b/assets/components/openshift-dns/node-resolver/service-account.yaml similarity index 100% rename from assets/core/0000_70_dns_01-node-resolver-service-account.yaml rename to assets/components/openshift-dns/node-resolver/service-account.yaml diff --git a/assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml b/assets/components/openshift-router/cluster-role-binding.yaml similarity index 100% rename from assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml rename to assets/components/openshift-router/cluster-role-binding.yaml diff --git a/assets/rbac/0000_80_openshift-router-cluster-role.yaml b/assets/components/openshift-router/cluster-role.yaml similarity index 100% rename from assets/rbac/0000_80_openshift-router-cluster-role.yaml rename to assets/components/openshift-router/cluster-role.yaml diff --git a/assets/core/0000_80_openshift-router-cm.yaml b/assets/components/openshift-router/configmap.yaml similarity index 100% rename from assets/core/0000_80_openshift-router-cm.yaml rename to assets/components/openshift-router/configmap.yaml diff --git a/assets/apps/0000_80_openshift-router-deployment.yaml b/assets/components/openshift-router/deployment.yaml similarity index 100% rename from assets/apps/0000_80_openshift-router-deployment.yaml rename to assets/components/openshift-router/deployment.yaml diff --git a/assets/core/0000_80_openshift-router-namespace.yaml b/assets/components/openshift-router/namespace.yaml similarity index 100% rename from assets/core/0000_80_openshift-router-namespace.yaml rename to assets/components/openshift-router/namespace.yaml diff --git a/assets/core/0000_80_openshift-router-service-account.yaml b/assets/components/openshift-router/service-account.yaml similarity index 100% rename from assets/core/0000_80_openshift-router-service-account.yaml rename to assets/components/openshift-router/service-account.yaml diff --git a/assets/core/0000_80_openshift-router-external-service.yaml b/assets/components/openshift-router/service-cloud.yaml similarity index 100% rename from assets/core/0000_80_openshift-router-external-service.yaml rename to assets/components/openshift-router/service-cloud.yaml diff --git a/assets/core/0000_80_openshift-router-service.yaml b/assets/components/openshift-router/service-internal.yaml similarity index 100% rename from assets/core/0000_80_openshift-router-service.yaml rename to assets/components/openshift-router/service-internal.yaml diff --git a/assets/rbac/0000_60_service-ca_00_clusterrole.yaml b/assets/components/service-ca/clusterrole.yaml similarity index 100% rename from assets/rbac/0000_60_service-ca_00_clusterrole.yaml rename to assets/components/service-ca/clusterrole.yaml diff --git a/assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml b/assets/components/service-ca/clusterrolebinding.yaml similarity index 100% rename from assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml rename to assets/components/service-ca/clusterrolebinding.yaml diff --git a/assets/apps/0000_60_service-ca_05_deploy.yaml b/assets/components/service-ca/deployment.yaml similarity index 100% rename from assets/apps/0000_60_service-ca_05_deploy.yaml rename to assets/components/service-ca/deployment.yaml diff --git a/assets/core/0000_60_service-ca_01_namespace.yaml b/assets/components/service-ca/ns.yaml similarity index 100% rename from assets/core/0000_60_service-ca_01_namespace.yaml rename to assets/components/service-ca/ns.yaml diff --git a/assets/rbac/0000_60_service-ca_00_role.yaml b/assets/components/service-ca/role.yaml similarity index 100% rename from assets/rbac/0000_60_service-ca_00_role.yaml rename to assets/components/service-ca/role.yaml diff --git a/assets/rbac/0000_60_service-ca_00_rolebinding.yaml b/assets/components/service-ca/rolebinding.yaml similarity index 100% rename from assets/rbac/0000_60_service-ca_00_rolebinding.yaml rename to assets/components/service-ca/rolebinding.yaml diff --git a/assets/core/0000_60_service-ca_04_sa.yaml b/assets/components/service-ca/sa.yaml similarity index 100% rename from assets/core/0000_60_service-ca_04_sa.yaml rename to assets/components/service-ca/sa.yaml diff --git a/assets/core/0000_60_service-ca_04_configmap.yaml b/assets/components/service-ca/signing-cabundle.yaml similarity index 100% rename from assets/core/0000_60_service-ca_04_configmap.yaml rename to assets/components/service-ca/signing-cabundle.yaml diff --git a/assets/core/0000_60_service-ca_04_secret.yaml b/assets/components/service-ca/signing-secret.yaml similarity index 100% rename from assets/core/0000_60_service-ca_04_secret.yaml rename to assets/components/service-ca/signing-secret.yaml diff --git a/pkg/assets/applier.go b/pkg/assets/applier.go old mode 100755 new mode 100644 diff --git a/pkg/assets/apps.go b/pkg/assets/apps.go old mode 100755 new mode 100644 index 152688e7727..93c1dbcdda3 --- a/pkg/assets/apps.go +++ b/pkg/assets/apps.go @@ -6,8 +6,6 @@ import ( "k8s.io/klog/v2" - appsassets "github.com/openshift/microshift/pkg/assets/apps" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -101,7 +99,7 @@ func applyApps(apps []string, applier readerApplier, render RenderFunc, params R for _, app := range apps { klog.Infof("Applying apps api %s", app) - objBytes, err := appsassets.Asset(app) + objBytes, err := Asset(app) if err != nil { return fmt.Errorf("error getting asset %s: %v", app, err) } diff --git a/pkg/assets/apps/bindata.go b/pkg/assets/apps/bindata.go deleted file mode 100644 index 87a822ae543..00000000000 --- a/pkg/assets/apps/bindata.go +++ /dev/null @@ -1,904 +0,0 @@ -// Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. -// sources: -// assets/apps/0000_00_flannel-daemonset.yaml -// assets/apps/0000_60_service-ca_05_deploy.yaml -// assets/apps/0000_70_dns_01-dns-daemonset.yaml -// assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml -// assets/apps/0000_80_openshift-router-deployment.yaml -// assets/apps/000_80_hostpath-provisioner-daemonset.yaml -package assets - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _assetsApps0000_00_flannelDaemonsetYaml = []byte(`apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-flannel-ds - namespace: kube-system - labels: - tier: node - app: flannel -spec: - selector: - matchLabels: - app: flannel - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - hostNetwork: true - priorityClassName: system-node-critical - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - initContainers: - - name: install-cni-bin - image: {{ .ReleaseImage.kube_flannel_cni }} - command: - - cp - args: - - -f - - /flannel - - /opt/cni/bin/flannel - volumeMounts: - - name: cni-plugin - mountPath: /opt/cni/bin - - name: install-cni - image: {{ .ReleaseImage.kube_flannel }} - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: {{ .ReleaseImage.kube_flannel }} - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run/flannel - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run/flannel - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg - - name: cni-plugin - hostPath: - path: /opt/cni/bin`) - -func assetsApps0000_00_flannelDaemonsetYamlBytes() ([]byte, error) { - return _assetsApps0000_00_flannelDaemonsetYaml, nil -} - -func assetsApps0000_00_flannelDaemonsetYaml() (*asset, error) { - bytes, err := assetsApps0000_00_flannelDaemonsetYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/0000_00_flannel-daemonset.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsApps0000_60_serviceCa_05_deployYaml = []byte(`apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: openshift-service-ca - name: service-ca - labels: - app: service-ca - service-ca: "true" -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: service-ca - service-ca: "true" - template: - metadata: - name: service-ca - annotations: - target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' - labels: - app: service-ca - service-ca: "true" - spec: - securityContext: {} - serviceAccount: service-ca - serviceAccountName: service-ca - containers: - - name: service-ca-controller - image: {{ .ReleaseImage.service_ca_operator }} - imagePullPolicy: IfNotPresent - command: ["service-ca-operator", "controller"] - ports: - - containerPort: 8443 - # securityContext: - # runAsNonRoot: true - resources: - requests: - memory: 120Mi - cpu: 10m - volumeMounts: - - mountPath: /var/run/secrets/signing-key - name: signing-key - - mountPath: /var/run/configmaps/signing-cabundle - name: signing-cabundle - volumes: - - name: signing-key - secret: - secretName: {{.TLSSecret}} - - name: signing-cabundle - configMap: - name: {{.CAConfigMap}} - # nodeSelector: - # node-role.kubernetes.io/master: "" - priorityClassName: "system-cluster-critical" - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: "NoSchedule" - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 120 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 120 -`) - -func assetsApps0000_60_serviceCa_05_deployYamlBytes() ([]byte, error) { - return _assetsApps0000_60_serviceCa_05_deployYaml, nil -} - -func assetsApps0000_60_serviceCa_05_deployYaml() (*asset, error) { - bytes, err := assetsApps0000_60_serviceCa_05_deployYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/0000_60_service-ca_05_deploy.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsApps0000_70_dns_01DnsDaemonsetYaml = []byte(`kind: DaemonSet -apiVersion: apps/v1 -metadata: - labels: - dns.operator.openshift.io/owning-dns: default - name: dns-default - namespace: openshift-dns -spec: - selector: - matchLabels: - dns.operator.openshift.io/daemonset-dns: default - template: - metadata: - labels: - dns.operator.openshift.io/daemonset-dns: default - annotations: - target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' - spec: - serviceAccountName: dns - priorityClassName: system-node-critical - containers: - - name: dns - image: {{ .ReleaseImage.coredns }} - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - command: [ "coredns" ] - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns - readOnly: true - ports: - - containerPort: 5353 - name: dns - protocol: UDP - - containerPort: 5353 - name: dns-tcp - protocol: TCP - readinessProbe: - httpGet: - path: /ready - port: 8181 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 3 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 3 - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - resources: - requests: - cpu: 50m - memory: 70Mi - - name: kube-rbac-proxy - image: {{ .ReleaseImage.kube_rbac_proxy }} - args: - - --secure-listen-address=:9154 - - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 - - --upstream=http://127.0.0.1:9153/ - - --tls-cert-file=/etc/tls/private/tls.crt - - --tls-private-key-file=/etc/tls/private/tls.key - ports: - - containerPort: 9154 - name: metrics - resources: - requests: - cpu: 10m - memory: 40Mi - volumeMounts: - - mountPath: /etc/tls/private - name: metrics-tls - readOnly: true - dnsPolicy: Default - nodeSelector: - kubernetes.io/os: linux - volumes: - - name: config-volume - configMap: - items: - - key: Corefile - path: Corefile - name: dns-default - - name: metrics-tls - secret: - defaultMode: 420 - secretName: dns-default-metrics-tls - tolerations: - # DNS needs to run everywhere. Tolerate all taints - - operator: Exists - updateStrategy: - type: RollingUpdate - rollingUpdate: - # TODO: Consider setting maxSurge to a positive value. - maxSurge: 0 - # Note: The daemon controller rounds the percentage up - # (unlike the deployment controller, which rounds down). - maxUnavailable: 10% -`) - -func assetsApps0000_70_dns_01DnsDaemonsetYamlBytes() ([]byte, error) { - return _assetsApps0000_70_dns_01DnsDaemonsetYaml, nil -} - -func assetsApps0000_70_dns_01DnsDaemonsetYaml() (*asset, error) { - bytes, err := assetsApps0000_70_dns_01DnsDaemonsetYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/0000_70_dns_01-dns-daemonset.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsApps0000_70_dns_01NodeResolverDaemonsetYaml = []byte(`apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: node-resolver - namespace: openshift-dns -spec: - revisionHistoryLimit: 10 - selector: - matchLabels: - dns.operator.openshift.io/daemonset-node-resolver: "" - template: - metadata: - annotations: - target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' - labels: - dns.operator.openshift.io/daemonset-node-resolver: "" - spec: - containers: - - command: - - /bin/bash - - -c - - | - #!/bin/bash - set -uo pipefail - - trap 'jobs -p | xargs kill || true; wait; exit 0' TERM - - NAMESERVER=${DNS_DEFAULT_SERVICE_HOST} - OPENSHIFT_MARKER="openshift-generated-node-resolver" - HOSTS_FILE="/etc/hosts" - TEMP_FILE="/etc/hosts.tmp" - - IFS=', ' read -r -a services <<< "${SERVICES}" - - # Make a temporary file with the old hosts file's attributes. - cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}" - - while true; do - declare -A svc_ips - for svc in "${services[@]}"; do - # Fetch service IP from cluster dns if present. We make several tries - # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones - # are for deployments with Kuryr on older OpenStack (OSP13) - those do not - # support UDP loadbalancers and require reaching DNS through TCP. - cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' - 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' - 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' - 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') - for i in ${!cmds[*]} - do - ips=($(eval "${cmds[i]}")) - if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then - svc_ips["${svc}"]="${ips[@]}" - break - fi - done - done - - # Update /etc/hosts only if we get valid service IPs - # We will not update /etc/hosts when there is coredns service outage or api unavailability - # Stale entries could exist in /etc/hosts if the service is deleted - if [[ -n "${svc_ips[*]-}" ]]; then - # Build a new hosts file from /etc/hosts with our custom entries filtered out - grep -v "# ${OPENSHIFT_MARKER}" "${HOSTS_FILE}" > "${TEMP_FILE}" - - # Append resolver entries for services - for svc in "${!svc_ips[@]}"; do - for ip in ${svc_ips[${svc}]}; do - echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" - done - done - - # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior - # Replace /etc/hosts with our modified version if needed - cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" - # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn - fi - sleep 60 & wait - unset svc_ips - done - env: - - name: SERVICES - # Comma or space separated list of services - # NOTE: For now, ensure these are relative names; for each relative name, - # an alias with the CLUSTER_DOMAIN suffix will also be added. - value: "image-registry.openshift-image-registry.svc" - - name: NAMESERVER - value: 172.30.0.10 - - name: CLUSTER_DOMAIN - value: cluster.local - image: {{ .ReleaseImage.cli }} - imagePullPolicy: IfNotPresent - name: dns-node-resolver - resources: - requests: - cpu: 5m - memory: 21Mi - securityContext: - privileged: true - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /etc/hosts - name: hosts-file - dnsPolicy: ClusterFirst - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: node-resolver - serviceAccountName: node-resolver - terminationGracePeriodSeconds: 30 - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /etc/hosts - type: File - name: hosts-file - updateStrategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 33% - type: RollingUpdate -`) - -func assetsApps0000_70_dns_01NodeResolverDaemonsetYamlBytes() ([]byte, error) { - return _assetsApps0000_70_dns_01NodeResolverDaemonsetYaml, nil -} - -func assetsApps0000_70_dns_01NodeResolverDaemonsetYaml() (*asset, error) { - bytes, err := assetsApps0000_70_dns_01NodeResolverDaemonsetYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsApps0000_80_openshiftRouterDeploymentYaml = []byte(`# Deployment with default values -# Ingress Controller specific values are applied at runtime. -kind: Deployment -apiVersion: apps/v1 -metadata: - name: router-default - namespace: openshift-ingress - labels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default -spec: - progressDeadlineSeconds: 600 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - annotations: - "unsupported.do-not-use.openshift.io/override-liveness-grace-period-seconds": "10" - target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' - labels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - spec: - serviceAccountName: router - # nodeSelector is set at runtime. - priorityClassName: system-cluster-critical - containers: - - name: router - image: {{ .ReleaseImage.haproxy_router }} - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - ports: - - name: http - containerPort: 80 - hostPort: 80 - protocol: TCP - - name: https - containerPort: 443 - hostPort: 443 - protocol: TCP - - name: metrics - containerPort: 1936 - hostPort: 1936 - protocol: TCP - # Merged at runtime. - env: - # stats username and password are generated at runtime - - name: STATS_PORT - value: "1936" - - name: ROUTER_SERVICE_NAMESPACE - value: openshift-ingress - - name: DEFAULT_CERTIFICATE_DIR - value: /etc/pki/tls/private - - name: DEFAULT_DESTINATION_CA_PATH - value: /var/run/configmaps/service-ca/service-ca.crt - - name: ROUTER_CIPHERS - value: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 - - name: ROUTER_DISABLE_HTTP2 - value: "true" - - name: ROUTER_DISABLE_NAMESPACE_OWNERSHIP_CHECK - value: "false" - #FIXME: use metrics tls - - name: ROUTER_METRICS_TLS_CERT_FILE - value: /etc/pki/tls/private/tls.crt - - name: ROUTER_METRICS_TLS_KEY_FILE - value: /etc/pki/tls/private/tls.key - - name: ROUTER_METRICS_TYPE - value: haproxy - - name: ROUTER_SERVICE_NAME - value: default - - name: ROUTER_SET_FORWARDED_HEADERS - value: append - - name: ROUTER_THREADS - value: "4" - - name: SSL_MIN_VERSION - value: TLSv1.2 - livenessProbe: - failureThreshold: 3 - httpGet: - host: localhost - path: /healthz - port: 1936 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - host: localhost - path: /healthz/ready - port: 1936 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - startupProbe: - failureThreshold: 120 - httpGet: - path: /healthz/ready - port: 1936 - periodSeconds: 1 - resources: - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /etc/pki/tls/private - name: default-certificate - readOnly: true - - mountPath: /var/run/configmaps/service-ca - name: service-ca-bundle - readOnly: true - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: router - volumes: - - name: default-certificate - secret: - defaultMode: 420 - secretName: router-certs-default - - name: service-ca-bundle - configMap: - items: - - key: service-ca.crt - path: service-ca.crt - name: service-ca-bundle - optional: false - defaultMode: 420 -`) - -func assetsApps0000_80_openshiftRouterDeploymentYamlBytes() ([]byte, error) { - return _assetsApps0000_80_openshiftRouterDeploymentYaml, nil -} - -func assetsApps0000_80_openshiftRouterDeploymentYaml() (*asset, error) { - bytes, err := assetsApps0000_80_openshiftRouterDeploymentYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/0000_80_openshift-router-deployment.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsApps000_80_hostpathProvisionerDaemonsetYaml = []byte(`apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kubevirt-hostpath-provisioner - labels: - k8s-app: kubevirt-hostpath-provisioner - namespace: kubevirt-hostpath-provisioner -spec: - selector: - matchLabels: - k8s-app: kubevirt-hostpath-provisioner - template: - metadata: - labels: - k8s-app: kubevirt-hostpath-provisioner - spec: - serviceAccountName: kubevirt-hostpath-provisioner-admin - containers: - - name: kubevirt-hostpath-provisioner - image: {{ .ReleaseImage.kubevirt_hostpath_provisioner }} - imagePullPolicy: Always - env: - - name: USE_NAMING_PREFIX - value: "false" # change to true, to have the name of the pvc be part of the directory - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: PV_DIR - value: /var/hpvolumes - volumeMounts: - - name: pv-volume # root dir where your bind mounts will be on the node - mountPath: /var/hpvolumes - #nodeSelector: - #- name: xxxxxx - volumes: - - name: pv-volume - hostPath: - path: /var/hpvolumes -`) - -func assetsApps000_80_hostpathProvisionerDaemonsetYamlBytes() ([]byte, error) { - return _assetsApps000_80_hostpathProvisionerDaemonsetYaml, nil -} - -func assetsApps000_80_hostpathProvisionerDaemonsetYaml() (*asset, error) { - bytes, err := assetsApps000_80_hostpathProvisionerDaemonsetYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/apps/000_80_hostpath-provisioner-daemonset.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "assets/apps/0000_00_flannel-daemonset.yaml": assetsApps0000_00_flannelDaemonsetYaml, - "assets/apps/0000_60_service-ca_05_deploy.yaml": assetsApps0000_60_serviceCa_05_deployYaml, - "assets/apps/0000_70_dns_01-dns-daemonset.yaml": assetsApps0000_70_dns_01DnsDaemonsetYaml, - "assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml": assetsApps0000_70_dns_01NodeResolverDaemonsetYaml, - "assets/apps/0000_80_openshift-router-deployment.yaml": assetsApps0000_80_openshiftRouterDeploymentYaml, - "assets/apps/000_80_hostpath-provisioner-daemonset.yaml": assetsApps000_80_hostpathProvisionerDaemonsetYaml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "assets": {nil, map[string]*bintree{ - "apps": {nil, map[string]*bintree{ - "0000_00_flannel-daemonset.yaml": {assetsApps0000_00_flannelDaemonsetYaml, map[string]*bintree{}}, - "0000_60_service-ca_05_deploy.yaml": {assetsApps0000_60_serviceCa_05_deployYaml, map[string]*bintree{}}, - "0000_70_dns_01-dns-daemonset.yaml": {assetsApps0000_70_dns_01DnsDaemonsetYaml, map[string]*bintree{}}, - "0000_70_dns_01-node-resolver-daemonset.yaml": {assetsApps0000_70_dns_01NodeResolverDaemonsetYaml, map[string]*bintree{}}, - "0000_80_openshift-router-deployment.yaml": {assetsApps0000_80_openshiftRouterDeploymentYaml, map[string]*bintree{}}, - "000_80_hostpath-provisioner-daemonset.yaml": {assetsApps000_80_hostpathProvisionerDaemonsetYaml, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/pkg/assets/crd/bindata.go b/pkg/assets/bindata.go similarity index 69% rename from pkg/assets/crd/bindata.go rename to pkg/assets/bindata.go index 7c6bd0dbe3a..5af716fc417 100644 --- a/pkg/assets/crd/bindata.go +++ b/pkg/assets/bindata.go @@ -1,5 +1,45 @@ // Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. // sources: +// assets/components/flannel/clusterrole.yaml +// assets/components/flannel/clusterrolebinding.yaml +// assets/components/flannel/configmap.yaml +// assets/components/flannel/daemonset.yaml +// assets/components/flannel/podsecuritypolicy.yaml +// assets/components/flannel/service-account.yaml +// assets/components/hostpath-provisioner/clusterrole.yaml +// assets/components/hostpath-provisioner/clusterrolebinding.yaml +// assets/components/hostpath-provisioner/daemonset.yaml +// assets/components/hostpath-provisioner/namespace.yaml +// assets/components/hostpath-provisioner/scc.yaml +// assets/components/hostpath-provisioner/service-account.yaml +// assets/components/hostpath-provisioner/storageclass.yaml +// assets/components/openshift-dns/dns/cluster-role-binding.yaml +// assets/components/openshift-dns/dns/cluster-role.yaml +// assets/components/openshift-dns/dns/configmap.yaml +// assets/components/openshift-dns/dns/daemonset.yaml +// assets/components/openshift-dns/dns/namespace.yaml +// assets/components/openshift-dns/dns/service-account.yaml +// assets/components/openshift-dns/dns/service.yaml +// assets/components/openshift-dns/node-resolver/daemonset.yaml +// assets/components/openshift-dns/node-resolver/service-account.yaml +// assets/components/openshift-router/cluster-role-binding.yaml +// assets/components/openshift-router/cluster-role.yaml +// assets/components/openshift-router/configmap.yaml +// assets/components/openshift-router/deployment.yaml +// assets/components/openshift-router/namespace.yaml +// assets/components/openshift-router/service-account.yaml +// assets/components/openshift-router/service-cloud.yaml +// assets/components/openshift-router/service-internal.yaml +// assets/components/service-ca/clusterrole.yaml +// assets/components/service-ca/clusterrolebinding.yaml +// assets/components/service-ca/deployment.yaml +// assets/components/service-ca/ns.yaml +// assets/components/service-ca/role.yaml +// assets/components/service-ca/rolebinding.yaml +// assets/components/service-ca/sa.yaml +// assets/components/service-ca/signing-cabundle.yaml +// assets/components/service-ca/signing-secret.yaml +// assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml // assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml // assets/crd/0000_03_config-operator_01_proxy.crd.yaml // assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml @@ -9,6 +49,13 @@ // assets/crd/0000_10_config-operator_01_image.crd.yaml // assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml // assets/crd/0000_11_imageregistry-configs.crd.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml +// assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml package assets import ( @@ -62,6 +109,1914 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } +var _assetsComponentsFlannelClusterroleYaml = []byte(`kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch`) + +func assetsComponentsFlannelClusterroleYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelClusterroleYaml, nil +} + +func assetsComponentsFlannelClusterroleYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelClusterroleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/clusterrole.yaml", size: 418, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsFlannelClusterrolebindingYaml = []byte(`kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system`) + +func assetsComponentsFlannelClusterrolebindingYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelClusterrolebindingYaml, nil +} + +func assetsComponentsFlannelClusterrolebindingYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelClusterrolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/clusterrolebinding.yaml", size: 248, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsFlannelConfigmapYaml = []byte(`kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "forceAddress": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.42.0.0/16", + "Backend": { + "Type": "vxlan" + } + }`) + +func assetsComponentsFlannelConfigmapYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelConfigmapYaml, nil +} + +func assetsComponentsFlannelConfigmapYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelConfigmapYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/configmap.yaml", size: 674, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsFlannelDaemonsetYaml = []byte(`apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-system + labels: + tier: node + app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-bin + image: {{ .ReleaseImage.kube_flannel_cni }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: {{ .ReleaseImage.kube_flannel }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: {{ .ReleaseImage.kube_flannel }} + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: cni-plugin + hostPath: + path: /opt/cni/bin`) + +func assetsComponentsFlannelDaemonsetYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelDaemonsetYaml, nil +} + +func assetsComponentsFlannelDaemonsetYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelDaemonsetYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/daemonset.yaml", size: 2543, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsFlannelPodsecuritypolicyYaml = []byte(`apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny'`) + +func assetsComponentsFlannelPodsecuritypolicyYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelPodsecuritypolicyYaml, nil +} + +func assetsComponentsFlannelPodsecuritypolicyYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelPodsecuritypolicyYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/podsecuritypolicy.yaml", size: 1195, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsFlannelServiceAccountYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system`) + +func assetsComponentsFlannelServiceAccountYamlBytes() ([]byte, error) { + return _assetsComponentsFlannelServiceAccountYaml, nil +} + +func assetsComponentsFlannelServiceAccountYaml() (*asset, error) { + bytes, err := assetsComponentsFlannelServiceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/flannel/service-account.yaml", size: 86, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerClusterroleYaml = []byte(`kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubevirt-hostpath-provisioner +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +`) + +func assetsComponentsHostpathProvisionerClusterroleYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerClusterroleYaml, nil +} + +func assetsComponentsHostpathProvisionerClusterroleYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerClusterroleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrole.yaml", size: 609, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerClusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubevirt-hostpath-provisioner +subjects: +- kind: ServiceAccount + name: kubevirt-hostpath-provisioner-admin + namespace: kubevirt-hostpath-provisioner +roleRef: + kind: ClusterRole + name: kubevirt-hostpath-provisioner + apiGroup: rbac.authorization.k8s.io`) + +func assetsComponentsHostpathProvisionerClusterrolebindingYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerClusterrolebindingYaml, nil +} + +func assetsComponentsHostpathProvisionerClusterrolebindingYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerClusterrolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrolebinding.yaml", size: 338, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerDaemonsetYaml = []byte(`apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kubevirt-hostpath-provisioner + labels: + k8s-app: kubevirt-hostpath-provisioner + namespace: kubevirt-hostpath-provisioner +spec: + selector: + matchLabels: + k8s-app: kubevirt-hostpath-provisioner + template: + metadata: + labels: + k8s-app: kubevirt-hostpath-provisioner + spec: + serviceAccountName: kubevirt-hostpath-provisioner-admin + containers: + - name: kubevirt-hostpath-provisioner + image: {{ .ReleaseImage.kubevirt_hostpath_provisioner }} + imagePullPolicy: Always + env: + - name: USE_NAMING_PREFIX + value: "false" # change to true, to have the name of the pvc be part of the directory + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: PV_DIR + value: /var/hpvolumes + volumeMounts: + - name: pv-volume # root dir where your bind mounts will be on the node + mountPath: /var/hpvolumes + #nodeSelector: + #- name: xxxxxx + volumes: + - name: pv-volume + hostPath: + path: /var/hpvolumes +`) + +func assetsComponentsHostpathProvisionerDaemonsetYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerDaemonsetYaml, nil +} + +func assetsComponentsHostpathProvisionerDaemonsetYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerDaemonsetYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/daemonset.yaml", size: 1225, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerNamespaceYaml = []byte(`apiVersion: v1 +kind: Namespace +metadata: + name: kubevirt-hostpath-provisioner`) + +func assetsComponentsHostpathProvisionerNamespaceYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerNamespaceYaml, nil +} + +func assetsComponentsHostpathProvisionerNamespaceYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerNamespaceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/namespace.yaml", size: 78, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerSccYaml = []byte(`kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: hostpath-provisioner +allowPrivilegedContainer: true +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +allowHostDirVolumePlugin: true +users: +- system:serviceaccount:kubevirt-hostpath-provisioner:kubevirt-hostpath-provisioner-admin +volumes: +- hostPath +- secret +`) + +func assetsComponentsHostpathProvisionerSccYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerSccYaml, nil +} + +func assetsComponentsHostpathProvisionerSccYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerSccYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/scc.yaml", size: 480, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerServiceAccountYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubevirt-hostpath-provisioner-admin + namespace: kubevirt-hostpath-provisioner`) + +func assetsComponentsHostpathProvisionerServiceAccountYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerServiceAccountYaml, nil +} + +func assetsComponentsHostpathProvisionerServiceAccountYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerServiceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/service-account.yaml", size: 132, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsHostpathProvisionerStorageclassYaml = []byte(`apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: kubevirt-hostpath-provisioner +provisioner: kubevirt.io/hostpath-provisioner +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer`) + +func assetsComponentsHostpathProvisionerStorageclassYamlBytes() ([]byte, error) { + return _assetsComponentsHostpathProvisionerStorageclassYaml, nil +} + +func assetsComponentsHostpathProvisionerStorageclassYaml() (*asset, error) { + bytes, err := assetsComponentsHostpathProvisionerStorageclassYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/storageclass.yaml", size: 204, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml = []byte(`kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openshift-dns +subjects: +- kind: ServiceAccount + name: dns + namespace: openshift-dns +roleRef: + kind: ClusterRole + name: openshift-dns +`) + +func assetsComponentsOpenshiftDnsDnsClusterRoleBindingYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsClusterRoleBindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role-binding.yaml", size: 223, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsClusterRoleYaml = []byte(`kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openshift-dns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +`) + +func assetsComponentsOpenshiftDnsDnsClusterRoleYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsClusterRoleYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsClusterRoleYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsClusterRoleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role.yaml", size: 492, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsConfigmapYaml = []byte(`apiVersion: v1 +data: + Corefile: | + .:5353 { + bufsize 512 + errors + health { + lameduck 20s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus 127.0.0.1:9153 + forward . /etc/resolv.conf { + policy sequential + } + cache 900 { + denial 9984 30 + } + reload + } +kind: ConfigMap +metadata: + labels: + dns.operator.openshift.io/owning-dns: default + name: dns-default + namespace: openshift-dns +`) + +func assetsComponentsOpenshiftDnsDnsConfigmapYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsConfigmapYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsConfigmapYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsConfigmapYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/configmap.yaml", size: 610, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsDaemonsetYaml = []byte(`kind: DaemonSet +apiVersion: apps/v1 +metadata: + labels: + dns.operator.openshift.io/owning-dns: default + name: dns-default + namespace: openshift-dns +spec: + selector: + matchLabels: + dns.operator.openshift.io/daemonset-dns: default + template: + metadata: + labels: + dns.operator.openshift.io/daemonset-dns: default + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + spec: + serviceAccountName: dns + priorityClassName: system-node-critical + containers: + - name: dns + image: {{ .ReleaseImage.coredns }} + imagePullPolicy: IfNotPresent + terminationMessagePolicy: FallbackToLogsOnError + command: [ "coredns" ] + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 5353 + name: dns + protocol: UDP + - containerPort: 5353 + name: dns-tcp + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 3 + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + resources: + requests: + cpu: 50m + memory: 70Mi + - name: kube-rbac-proxy + image: {{ .ReleaseImage.kube_rbac_proxy }} + args: + - --secure-listen-address=:9154 + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 + - --upstream=http://127.0.0.1:9153/ + - --tls-cert-file=/etc/tls/private/tls.crt + - --tls-private-key-file=/etc/tls/private/tls.key + ports: + - containerPort: 9154 + name: metrics + resources: + requests: + cpu: 10m + memory: 40Mi + volumeMounts: + - mountPath: /etc/tls/private + name: metrics-tls + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config-volume + configMap: + items: + - key: Corefile + path: Corefile + name: dns-default + - name: metrics-tls + secret: + defaultMode: 420 + secretName: dns-default-metrics-tls + tolerations: + # DNS needs to run everywhere. Tolerate all taints + - operator: Exists + updateStrategy: + type: RollingUpdate + rollingUpdate: + # TODO: Consider setting maxSurge to a positive value. + maxSurge: 0 + # Note: The daemon controller rounds the percentage up + # (unlike the deployment controller, which rounds down). + maxUnavailable: 10% +`) + +func assetsComponentsOpenshiftDnsDnsDaemonsetYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsDaemonsetYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsDaemonsetYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsDaemonsetYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/daemonset.yaml", size: 3179, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsNamespaceYaml = []byte(`kind: Namespace +apiVersion: v1 +metadata: + annotations: + openshift.io/node-selector: "" + workload.openshift.io/allowed: "management" + name: openshift-dns + labels: + # set value to avoid depending on kube admission that depends on openshift apis + openshift.io/run-level: "0" + # allow openshift-monitoring to look for ServiceMonitor objects in this namespace + openshift.io/cluster-monitoring: "true" +`) + +func assetsComponentsOpenshiftDnsDnsNamespaceYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsNamespaceYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsNamespaceYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsNamespaceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/namespace.yaml", size: 417, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsServiceAccountYaml = []byte(`kind: ServiceAccount +apiVersion: v1 +metadata: + name: dns + namespace: openshift-dns +`) + +func assetsComponentsOpenshiftDnsDnsServiceAccountYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsServiceAccountYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsServiceAccountYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsServiceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service-account.yaml", size: 85, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsDnsServiceYaml = []byte(`kind: Service +apiVersion: v1 +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: dns-default-metrics-tls + labels: + dns.operator.openshift.io/owning-dns: default + name: dns-default + namespace: openshift-dns +spec: + clusterIP: {{.ClusterIP}} + selector: + dns.operator.openshift.io/daemonset-dns: default + ports: + - name: dns + port: 53 + targetPort: dns + protocol: UDP + - name: dns-tcp + port: 53 + targetPort: dns-tcp + protocol: TCP + - name: metrics + port: 9154 + targetPort: metrics + protocol: TCP + # TODO: Uncomment when service topology feature gate is enabled. + #topologyKeys: + # - "kubernetes.io/hostname" + # - "*" +`) + +func assetsComponentsOpenshiftDnsDnsServiceYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsDnsServiceYaml, nil +} + +func assetsComponentsOpenshiftDnsDnsServiceYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsDnsServiceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service.yaml", size: 691, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml = []byte(`apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-resolver + namespace: openshift-dns +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + dns.operator.openshift.io/daemonset-node-resolver: "" + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + dns.operator.openshift.io/daemonset-node-resolver: "" + spec: + containers: + - command: + - /bin/bash + - -c + - | + #!/bin/bash + set -uo pipefail + + trap 'jobs -p | xargs kill || true; wait; exit 0' TERM + + NAMESERVER=${DNS_DEFAULT_SERVICE_HOST} + OPENSHIFT_MARKER="openshift-generated-node-resolver" + HOSTS_FILE="/etc/hosts" + TEMP_FILE="/etc/hosts.tmp" + + IFS=', ' read -r -a services <<< "${SERVICES}" + + # Make a temporary file with the old hosts file's attributes. + cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}" + + while true; do + declare -A svc_ips + for svc in "${services[@]}"; do + # Fetch service IP from cluster dns if present. We make several tries + # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones + # are for deployments with Kuryr on older OpenStack (OSP13) - those do not + # support UDP loadbalancers and require reaching DNS through TCP. + cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' + 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' + 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' + 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') + for i in ${!cmds[*]} + do + ips=($(eval "${cmds[i]}")) + if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then + svc_ips["${svc}"]="${ips[@]}" + break + fi + done + done + + # Update /etc/hosts only if we get valid service IPs + # We will not update /etc/hosts when there is coredns service outage or api unavailability + # Stale entries could exist in /etc/hosts if the service is deleted + if [[ -n "${svc_ips[*]-}" ]]; then + # Build a new hosts file from /etc/hosts with our custom entries filtered out + grep -v "# ${OPENSHIFT_MARKER}" "${HOSTS_FILE}" > "${TEMP_FILE}" + + # Append resolver entries for services + for svc in "${!svc_ips[@]}"; do + for ip in ${svc_ips[${svc}]}; do + echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" + done + done + + # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior + # Replace /etc/hosts with our modified version if needed + cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" + # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn + fi + sleep 60 & wait + unset svc_ips + done + env: + - name: SERVICES + # Comma or space separated list of services + # NOTE: For now, ensure these are relative names; for each relative name, + # an alias with the CLUSTER_DOMAIN suffix will also be added. + value: "image-registry.openshift-image-registry.svc" + - name: NAMESERVER + value: 172.30.0.10 + - name: CLUSTER_DOMAIN + value: cluster.local + image: {{ .ReleaseImage.cli }} + imagePullPolicy: IfNotPresent + name: dns-node-resolver + resources: + requests: + cpu: 5m + memory: 21Mi + securityContext: + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosts + name: hosts-file + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: node-resolver + serviceAccountName: node-resolver + terminationGracePeriodSeconds: 30 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /etc/hosts + type: File + name: hosts-file + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 33% + type: RollingUpdate +`) + +func assetsComponentsOpenshiftDnsNodeResolverDaemonsetYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml, nil +} + +func assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsNodeResolverDaemonsetYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/daemonset.yaml", size: 4823, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml = []byte(`kind: ServiceAccount +apiVersion: v1 +metadata: + name: node-resolver + namespace: openshift-dns +`) + +func assetsComponentsOpenshiftDnsNodeResolverServiceAccountYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml, nil +} + +func assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftDnsNodeResolverServiceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/service-account.yaml", size: 95, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterClusterRoleBindingYaml = []byte(`# Binds the router role to its Service Account. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openshift-ingress-router +subjects: +- kind: ServiceAccount + name: router + namespace: openshift-ingress +roleRef: + kind: ClusterRole + name: openshift-ingress-router + namespace: openshift-ingress +`) + +func assetsComponentsOpenshiftRouterClusterRoleBindingYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterClusterRoleBindingYaml, nil +} + +func assetsComponentsOpenshiftRouterClusterRoleBindingYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterClusterRoleBindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role-binding.yaml", size: 329, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterClusterRoleYaml = []byte(`# Cluster scoped role for routers. This should be as restrictive as possible. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: openshift-ingress-router +rules: +- apiGroups: + - "" + resources: + - endpoints + - namespaces + - services + verbs: + - list + - watch + +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - list + - watch + +- apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - update + +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - hostnetwork + +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +`) + +func assetsComponentsOpenshiftRouterClusterRoleYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterClusterRoleYaml, nil +} + +func assetsComponentsOpenshiftRouterClusterRoleYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterClusterRoleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role.yaml", size: 883, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterConfigmapYaml = []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + namespace: openshift-ingress + name: service-ca-bundle + annotations: + service.beta.openshift.io/inject-cabundle: "true" +`) + +func assetsComponentsOpenshiftRouterConfigmapYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterConfigmapYaml, nil +} + +func assetsComponentsOpenshiftRouterConfigmapYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterConfigmapYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/configmap.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterDeploymentYaml = []byte(`# Deployment with default values +# Ingress Controller specific values are applied at runtime. +kind: Deployment +apiVersion: apps/v1 +metadata: + name: router-default + namespace: openshift-ingress + labels: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: + "unsupported.do-not-use.openshift.io/override-liveness-grace-period-seconds": "10" + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + spec: + serviceAccountName: router + # nodeSelector is set at runtime. + priorityClassName: system-cluster-critical + containers: + - name: router + image: {{ .ReleaseImage.haproxy_router }} + imagePullPolicy: IfNotPresent + terminationMessagePolicy: FallbackToLogsOnError + ports: + - name: http + containerPort: 80 + hostPort: 80 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 443 + protocol: TCP + - name: metrics + containerPort: 1936 + hostPort: 1936 + protocol: TCP + # Merged at runtime. + env: + # stats username and password are generated at runtime + - name: STATS_PORT + value: "1936" + - name: ROUTER_SERVICE_NAMESPACE + value: openshift-ingress + - name: DEFAULT_CERTIFICATE_DIR + value: /etc/pki/tls/private + - name: DEFAULT_DESTINATION_CA_PATH + value: /var/run/configmaps/service-ca/service-ca.crt + - name: ROUTER_CIPHERS + value: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + - name: ROUTER_DISABLE_HTTP2 + value: "true" + - name: ROUTER_DISABLE_NAMESPACE_OWNERSHIP_CHECK + value: "false" + #FIXME: use metrics tls + - name: ROUTER_METRICS_TLS_CERT_FILE + value: /etc/pki/tls/private/tls.crt + - name: ROUTER_METRICS_TLS_KEY_FILE + value: /etc/pki/tls/private/tls.key + - name: ROUTER_METRICS_TYPE + value: haproxy + - name: ROUTER_SERVICE_NAME + value: default + - name: ROUTER_SET_FORWARDED_HEADERS + value: append + - name: ROUTER_THREADS + value: "4" + - name: SSL_MIN_VERSION + value: TLSv1.2 + livenessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: /healthz + port: 1936 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: /healthz/ready + port: 1936 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + startupProbe: + failureThreshold: 120 + httpGet: + path: /healthz/ready + port: 1936 + periodSeconds: 1 + resources: + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/pki/tls/private + name: default-certificate + readOnly: true + - mountPath: /var/run/configmaps/service-ca + name: service-ca-bundle + readOnly: true + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: router + volumes: + - name: default-certificate + secret: + defaultMode: 420 + secretName: router-certs-default + - name: service-ca-bundle + configMap: + items: + - key: service-ca.crt + path: service-ca.crt + name: service-ca-bundle + optional: false + defaultMode: 420 +`) + +func assetsComponentsOpenshiftRouterDeploymentYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterDeploymentYaml, nil +} + +func assetsComponentsOpenshiftRouterDeploymentYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterDeploymentYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/deployment.yaml", size: 4746, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterNamespaceYaml = []byte(`kind: Namespace +apiVersion: v1 +metadata: + name: openshift-ingress + annotations: + openshift.io/node-selector: "" + workload.openshift.io/allowed: "management" + labels: + # allow openshift-monitoring to look for ServiceMonitor objects in this namespace + openshift.io/cluster-monitoring: "true" + name: openshift-ingress + # old and new forms of the label for matching with NetworkPolicy + network.openshift.io/policy-group: ingress + policy-group.network.openshift.io/ingress: "" +`) + +func assetsComponentsOpenshiftRouterNamespaceYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterNamespaceYaml, nil +} + +func assetsComponentsOpenshiftRouterNamespaceYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterNamespaceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/namespace.yaml", size: 499, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterServiceAccountYaml = []byte(`# Account for routers created by the operator. It will require cluster scoped +# permissions related to Route processing. +kind: ServiceAccount +apiVersion: v1 +metadata: + name: router + namespace: openshift-ingress +`) + +func assetsComponentsOpenshiftRouterServiceAccountYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterServiceAccountYaml, nil +} + +func assetsComponentsOpenshiftRouterServiceAccountYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterServiceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/service-account.yaml", size: 213, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterServiceCloudYaml = []byte(`kind: Service +apiVersion: v1 +metadata: + annotations: + service.alpha.openshift.io/serving-cert-secret-name: router-certs-default + labels: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + name: router-external-default + namespace: openshift-ingress +spec: + selector: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + type: NodePort + ports: + - name: http + port: 80 + targetPort: 80 + nodePort: 30001 + - name: https + port: 443 + targetPort: 443 + nodePort: 30002 +`) + +func assetsComponentsOpenshiftRouterServiceCloudYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterServiceCloudYaml, nil +} + +func assetsComponentsOpenshiftRouterServiceCloudYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterServiceCloudYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/service-cloud.yaml", size: 567, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsOpenshiftRouterServiceInternalYaml = []byte(`# Cluster Service with default values +# Ingress Controller specific annotations are applied at runtime. +kind: Service +apiVersion: v1 +metadata: + annotations: + service.alpha.openshift.io/serving-cert-secret-name: router-certs-default + labels: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + name: router-internal-default + namespace: openshift-ingress +spec: + selector: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + type: ClusterIP + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: https + port: 443 + protocol: TCP + targetPort: https + - name: metrics + port: 1936 + protocol: TCP + targetPort: 1936 +`) + +func assetsComponentsOpenshiftRouterServiceInternalYamlBytes() ([]byte, error) { + return _assetsComponentsOpenshiftRouterServiceInternalYaml, nil +} + +func assetsComponentsOpenshiftRouterServiceInternalYaml() (*asset, error) { + bytes, err := assetsComponentsOpenshiftRouterServiceInternalYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/openshift-router/service-internal.yaml", size: 727, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaClusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:openshift:controller:service-ca +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - update +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update +`) + +func assetsComponentsServiceCaClusterroleYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaClusterroleYaml, nil +} + +func assetsComponentsServiceCaClusterroleYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaClusterroleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/clusterrole.yaml", size: 864, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaClusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:controller:service-ca +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + namespace: openshift-service-ca + name: service-ca +`) + +func assetsComponentsServiceCaClusterrolebindingYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaClusterrolebindingYaml, nil +} + +func assetsComponentsServiceCaClusterrolebindingYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaClusterrolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/clusterrolebinding.yaml", size: 298, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaDeploymentYaml = []byte(`apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: openshift-service-ca + name: service-ca + labels: + app: service-ca + service-ca: "true" +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: service-ca + service-ca: "true" + template: + metadata: + name: service-ca + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + app: service-ca + service-ca: "true" + spec: + securityContext: {} + serviceAccount: service-ca + serviceAccountName: service-ca + containers: + - name: service-ca-controller + image: {{ .ReleaseImage.service_ca_operator }} + imagePullPolicy: IfNotPresent + command: ["service-ca-operator", "controller"] + ports: + - containerPort: 8443 + # securityContext: + # runAsNonRoot: true + resources: + requests: + memory: 120Mi + cpu: 10m + volumeMounts: + - mountPath: /var/run/secrets/signing-key + name: signing-key + - mountPath: /var/run/configmaps/signing-cabundle + name: signing-cabundle + volumes: + - name: signing-key + secret: + secretName: {{.TLSSecret}} + - name: signing-cabundle + configMap: + name: {{.CAConfigMap}} + # nodeSelector: + # node-role.kubernetes.io/master: "" + priorityClassName: "system-cluster-critical" + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: "NoSchedule" + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 120 + - key: "node.kubernetes.io/not-ready" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 120 +`) + +func assetsComponentsServiceCaDeploymentYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaDeploymentYaml, nil +} + +func assetsComponentsServiceCaDeploymentYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaDeploymentYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/deployment.yaml", size: 1866, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaNsYaml = []byte(`apiVersion: v1 +kind: Namespace +metadata: + name: openshift-service-ca + annotations: + openshift.io/node-selector: "" + workload.openshift.io/allowed: "management" +`) + +func assetsComponentsServiceCaNsYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaNsYaml, nil +} + +func assetsComponentsServiceCaNsYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaNsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/ns.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaRoleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: system:openshift:controller:service-ca + namespace: openshift-service-ca +rules: +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - restricted + verbs: + - use +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update + - create +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "apps" + resources: + - replicasets + - deployments + verbs: + - get + - list + - watch`) + +func assetsComponentsServiceCaRoleYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaRoleYaml, nil +} + +func assetsComponentsServiceCaRoleYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaRoleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/role.yaml", size: 634, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaRolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: system:openshift:controller:service-ca + namespace: openshift-service-ca +roleRef: + kind: Role + name: system:openshift:controller:service-ca + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + namespace: openshift-service-ca + name: service-ca +`) + +func assetsComponentsServiceCaRolebindingYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaRolebindingYaml, nil +} + +func assetsComponentsServiceCaRolebindingYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaRolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/rolebinding.yaml", size: 343, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaSaYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-service-ca + name: service-ca +`) + +func assetsComponentsServiceCaSaYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaSaYaml, nil +} + +func assetsComponentsServiceCaSaYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaSaYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/sa.yaml", size: 99, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaSigningCabundleYaml = []byte(`apiVersion: v1 +kind: ConfigMap +metadata: + namespace: openshift-service-ca + name: signing-cabundle +data: + ca-bundle.crt: +`) + +func assetsComponentsServiceCaSigningCabundleYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaSigningCabundleYaml, nil +} + +func assetsComponentsServiceCaSigningCabundleYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaSigningCabundleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/signing-cabundle.yaml", size: 123, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsComponentsServiceCaSigningSecretYaml = []byte(`apiVersion: v1 +kind: Secret +metadata: + namespace: openshift-service-ca + name: signing-key +type: kubernetes.io/tls +data: + tls.crt: + tls.key: +`) + +func assetsComponentsServiceCaSigningSecretYamlBytes() ([]byte, error) { + return _assetsComponentsServiceCaSigningSecretYaml, nil +} + +func assetsComponentsServiceCaSigningSecretYaml() (*asset, error) { + bytes, err := assetsComponentsServiceCaSigningSecretYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/components/service-ca/signing-secret.yaml", size: 144, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml = []byte(`apiVersion: v1 +kind: Namespace +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + openshift.io/node-selector: "" + labels: + openshift.io/cluster-monitoring: "true" + name: openshift-controller-manager +`) + +func assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYamlBytes() ([]byte, error) { + return _assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, nil +} + +func assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml() (*asset, error) { + bytes, err := assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", size: 254, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsCrd0000_03_authorizationOpenshift_01_rolebindingrestrictionCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -285,7 +2240,7 @@ func assetsCrd0000_03_authorizationOpenshift_01_rolebindingrestrictionCrdYaml() return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml", size: 10910, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -406,7 +2361,7 @@ func assetsCrd0000_03_configOperator_01_proxyCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_config-operator_01_proxy.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_config-operator_01_proxy.crd.yaml", size: 4972, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -675,7 +2630,7 @@ func assetsCrd0000_03_quotaOpenshift_01_clusterresourcequotaCrdYaml() (*asset, e return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", size: 12895, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1062,7 +3017,7 @@ func assetsCrd0000_03_securityOpenshift_01_sccCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_security-openshift_01_scc.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_security-openshift_01_scc.crd.yaml", size: 17110, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1480,7 +3435,7 @@ func assetsCrd0000_10_configOperator_01_buildCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_build.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_build.crd.yaml", size: 22856, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1576,7 +3531,7 @@ func assetsCrd0000_10_configOperator_01_featuregateCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_featuregate.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_featuregate.crd.yaml", size: 3486, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1755,7 +3710,7 @@ func assetsCrd0000_10_configOperator_01_imageCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_image.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_image.crd.yaml", size: 8484, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1866,7 +3821,7 @@ func assetsCrd0000_10_configOperator_01_imagecontentsourcepolicyCrdYaml() (*asse return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml", size: 5139, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3426,7 +5381,445 @@ func assetsCrd0000_11_imageregistryConfigsCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_11_imageregistry-configs.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "assets/crd/0000_11_imageregistry-configs.crd.yaml", size: 90225, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml = []byte(`allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: anyuid provides all features of the restricted SCC + but allows users to run with any UID and any GID. + name: anyuid +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml", size: 1048, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml = []byte(`allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: 'hostaccess allows access to all host namespaces but + still requires pods to be run with a UID and SELinux context that are allocated + to the namespace. WARNING: this SCC allows host access to namespaces, file systems, + and PIDS. It should only be used by trusted pods. Grant with caution.' + name: hostaccess +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAsRange +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- hostPath +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml", size: 1267, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml = []byte(`allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: RunAsAny +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: |- + hostmount-anyuid provides all the features of the + restricted SCC but allows host mounts and any UID by a pod. This is primarily + used by the persistent volume recycler. WARNING: this SCC allows host file + system access as any UID, including UID 0. Grant with caution. + name: hostmount-anyuid +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:openshift-infra:pv-recycler-controller +volumes: +- configMap +- downwardAPI +- emptyDir +- hostPath +- nfs +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml", size: 1298, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml = []byte(`allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: hostnetwork allows using host networking and host ports + but still requires pods to be run with a UID and SELinux context that are allocated + to the namespace. + name: hostnetwork +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAsRange +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: MustRunAs +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml", size: 1123, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml = []byte(`allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: RunAsAny +groups: [] +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: nonroot provides all features of the restricted SCC + but allows users to run with any non-root UID. The user must specify the UID + or it must be specified on the by the manifest of the container runtime. + name: nonroot +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAsNonRoot +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml", size: 1166, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml = []byte(`allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: +- "*" +allowedUnsafeSysctls: +- "*" +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +- system:nodes +- system:masters +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: 'privileged allows access to all privileged and host + features and the ability to run as any user, any group, any fsGroup, and with + any SELinux context. WARNING: this is the most relaxed SCC and should be used + only for cluster administration. Grant with caution.' + name: privileged +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- "*" +supplementalGroups: + type: RunAsAny +users: +- system:admin +- system:serviceaccount:openshift-infra:build-controller +volumes: +- "*" +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml", size: 1291, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml = []byte(`allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: false +allowedCapabilities: +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: +fsGroup: + type: MustRunAs +groups: +- system:authenticated +kind: SecurityContextConstraints +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/create-only: "true" + kubernetes.io/description: restricted denies access to all host features and requires + pods to be run with a UID, and SELinux context that are allocated to the namespace. This + is the most restrictive SCC and it is used by default for authenticated users. + name: restricted +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: +- KILL +- MKNOD +- SETUID +- SETGID +runAsUser: + type: MustRunAsRange +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: [] +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +`) + +func assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYamlBytes() ([]byte, error) { + return _assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, nil +} + +func assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml() (*asset, error) { + bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml", size: 1213, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3483,6 +5876,46 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ + "assets/components/flannel/clusterrole.yaml": assetsComponentsFlannelClusterroleYaml, + "assets/components/flannel/clusterrolebinding.yaml": assetsComponentsFlannelClusterrolebindingYaml, + "assets/components/flannel/configmap.yaml": assetsComponentsFlannelConfigmapYaml, + "assets/components/flannel/daemonset.yaml": assetsComponentsFlannelDaemonsetYaml, + "assets/components/flannel/podsecuritypolicy.yaml": assetsComponentsFlannelPodsecuritypolicyYaml, + "assets/components/flannel/service-account.yaml": assetsComponentsFlannelServiceAccountYaml, + "assets/components/hostpath-provisioner/clusterrole.yaml": assetsComponentsHostpathProvisionerClusterroleYaml, + "assets/components/hostpath-provisioner/clusterrolebinding.yaml": assetsComponentsHostpathProvisionerClusterrolebindingYaml, + "assets/components/hostpath-provisioner/daemonset.yaml": assetsComponentsHostpathProvisionerDaemonsetYaml, + "assets/components/hostpath-provisioner/namespace.yaml": assetsComponentsHostpathProvisionerNamespaceYaml, + "assets/components/hostpath-provisioner/scc.yaml": assetsComponentsHostpathProvisionerSccYaml, + "assets/components/hostpath-provisioner/service-account.yaml": assetsComponentsHostpathProvisionerServiceAccountYaml, + "assets/components/hostpath-provisioner/storageclass.yaml": assetsComponentsHostpathProvisionerStorageclassYaml, + "assets/components/openshift-dns/dns/cluster-role-binding.yaml": assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml, + "assets/components/openshift-dns/dns/cluster-role.yaml": assetsComponentsOpenshiftDnsDnsClusterRoleYaml, + "assets/components/openshift-dns/dns/configmap.yaml": assetsComponentsOpenshiftDnsDnsConfigmapYaml, + "assets/components/openshift-dns/dns/daemonset.yaml": assetsComponentsOpenshiftDnsDnsDaemonsetYaml, + "assets/components/openshift-dns/dns/namespace.yaml": assetsComponentsOpenshiftDnsDnsNamespaceYaml, + "assets/components/openshift-dns/dns/service-account.yaml": assetsComponentsOpenshiftDnsDnsServiceAccountYaml, + "assets/components/openshift-dns/dns/service.yaml": assetsComponentsOpenshiftDnsDnsServiceYaml, + "assets/components/openshift-dns/node-resolver/daemonset.yaml": assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml, + "assets/components/openshift-dns/node-resolver/service-account.yaml": assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml, + "assets/components/openshift-router/cluster-role-binding.yaml": assetsComponentsOpenshiftRouterClusterRoleBindingYaml, + "assets/components/openshift-router/cluster-role.yaml": assetsComponentsOpenshiftRouterClusterRoleYaml, + "assets/components/openshift-router/configmap.yaml": assetsComponentsOpenshiftRouterConfigmapYaml, + "assets/components/openshift-router/deployment.yaml": assetsComponentsOpenshiftRouterDeploymentYaml, + "assets/components/openshift-router/namespace.yaml": assetsComponentsOpenshiftRouterNamespaceYaml, + "assets/components/openshift-router/service-account.yaml": assetsComponentsOpenshiftRouterServiceAccountYaml, + "assets/components/openshift-router/service-cloud.yaml": assetsComponentsOpenshiftRouterServiceCloudYaml, + "assets/components/openshift-router/service-internal.yaml": assetsComponentsOpenshiftRouterServiceInternalYaml, + "assets/components/service-ca/clusterrole.yaml": assetsComponentsServiceCaClusterroleYaml, + "assets/components/service-ca/clusterrolebinding.yaml": assetsComponentsServiceCaClusterrolebindingYaml, + "assets/components/service-ca/deployment.yaml": assetsComponentsServiceCaDeploymentYaml, + "assets/components/service-ca/ns.yaml": assetsComponentsServiceCaNsYaml, + "assets/components/service-ca/role.yaml": assetsComponentsServiceCaRoleYaml, + "assets/components/service-ca/rolebinding.yaml": assetsComponentsServiceCaRolebindingYaml, + "assets/components/service-ca/sa.yaml": assetsComponentsServiceCaSaYaml, + "assets/components/service-ca/signing-cabundle.yaml": assetsComponentsServiceCaSigningCabundleYaml, + "assets/components/service-ca/signing-secret.yaml": assetsComponentsServiceCaSigningSecretYaml, + "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml": assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, "assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml": assetsCrd0000_03_authorizationOpenshift_01_rolebindingrestrictionCrdYaml, "assets/crd/0000_03_config-operator_01_proxy.crd.yaml": assetsCrd0000_03_configOperator_01_proxyCrdYaml, "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml": assetsCrd0000_03_quotaOpenshift_01_clusterresourcequotaCrdYaml, @@ -3492,6 +5925,13 @@ var _bindata = map[string]func() (*asset, error){ "assets/crd/0000_10_config-operator_01_image.crd.yaml": assetsCrd0000_10_configOperator_01_imageCrdYaml, "assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml": assetsCrd0000_10_configOperator_01_imagecontentsourcepolicyCrdYaml, "assets/crd/0000_11_imageregistry-configs.crd.yaml": assetsCrd0000_11_imageregistryConfigsCrdYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, + "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, } // AssetDir returns the file names below a certain @@ -3536,6 +5976,64 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "assets": {nil, map[string]*bintree{ + "components": {nil, map[string]*bintree{ + "flannel": {nil, map[string]*bintree{ + "clusterrole.yaml": {assetsComponentsFlannelClusterroleYaml, map[string]*bintree{}}, + "clusterrolebinding.yaml": {assetsComponentsFlannelClusterrolebindingYaml, map[string]*bintree{}}, + "configmap.yaml": {assetsComponentsFlannelConfigmapYaml, map[string]*bintree{}}, + "daemonset.yaml": {assetsComponentsFlannelDaemonsetYaml, map[string]*bintree{}}, + "podsecuritypolicy.yaml": {assetsComponentsFlannelPodsecuritypolicyYaml, map[string]*bintree{}}, + "service-account.yaml": {assetsComponentsFlannelServiceAccountYaml, map[string]*bintree{}}, + }}, + "hostpath-provisioner": {nil, map[string]*bintree{ + "clusterrole.yaml": {assetsComponentsHostpathProvisionerClusterroleYaml, map[string]*bintree{}}, + "clusterrolebinding.yaml": {assetsComponentsHostpathProvisionerClusterrolebindingYaml, map[string]*bintree{}}, + "daemonset.yaml": {assetsComponentsHostpathProvisionerDaemonsetYaml, map[string]*bintree{}}, + "namespace.yaml": {assetsComponentsHostpathProvisionerNamespaceYaml, map[string]*bintree{}}, + "scc.yaml": {assetsComponentsHostpathProvisionerSccYaml, map[string]*bintree{}}, + "service-account.yaml": {assetsComponentsHostpathProvisionerServiceAccountYaml, map[string]*bintree{}}, + "storageclass.yaml": {assetsComponentsHostpathProvisionerStorageclassYaml, map[string]*bintree{}}, + }}, + "openshift-dns": {nil, map[string]*bintree{ + "dns": {nil, map[string]*bintree{ + "cluster-role-binding.yaml": {assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml, map[string]*bintree{}}, + "cluster-role.yaml": {assetsComponentsOpenshiftDnsDnsClusterRoleYaml, map[string]*bintree{}}, + "configmap.yaml": {assetsComponentsOpenshiftDnsDnsConfigmapYaml, map[string]*bintree{}}, + "daemonset.yaml": {assetsComponentsOpenshiftDnsDnsDaemonsetYaml, map[string]*bintree{}}, + "namespace.yaml": {assetsComponentsOpenshiftDnsDnsNamespaceYaml, map[string]*bintree{}}, + "service-account.yaml": {assetsComponentsOpenshiftDnsDnsServiceAccountYaml, map[string]*bintree{}}, + "service.yaml": {assetsComponentsOpenshiftDnsDnsServiceYaml, map[string]*bintree{}}, + }}, + "node-resolver": {nil, map[string]*bintree{ + "daemonset.yaml": {assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml, map[string]*bintree{}}, + "service-account.yaml": {assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml, map[string]*bintree{}}, + }}, + }}, + "openshift-router": {nil, map[string]*bintree{ + "cluster-role-binding.yaml": {assetsComponentsOpenshiftRouterClusterRoleBindingYaml, map[string]*bintree{}}, + "cluster-role.yaml": {assetsComponentsOpenshiftRouterClusterRoleYaml, map[string]*bintree{}}, + "configmap.yaml": {assetsComponentsOpenshiftRouterConfigmapYaml, map[string]*bintree{}}, + "deployment.yaml": {assetsComponentsOpenshiftRouterDeploymentYaml, map[string]*bintree{}}, + "namespace.yaml": {assetsComponentsOpenshiftRouterNamespaceYaml, map[string]*bintree{}}, + "service-account.yaml": {assetsComponentsOpenshiftRouterServiceAccountYaml, map[string]*bintree{}}, + "service-cloud.yaml": {assetsComponentsOpenshiftRouterServiceCloudYaml, map[string]*bintree{}}, + "service-internal.yaml": {assetsComponentsOpenshiftRouterServiceInternalYaml, map[string]*bintree{}}, + }}, + "service-ca": {nil, map[string]*bintree{ + "clusterrole.yaml": {assetsComponentsServiceCaClusterroleYaml, map[string]*bintree{}}, + "clusterrolebinding.yaml": {assetsComponentsServiceCaClusterrolebindingYaml, map[string]*bintree{}}, + "deployment.yaml": {assetsComponentsServiceCaDeploymentYaml, map[string]*bintree{}}, + "ns.yaml": {assetsComponentsServiceCaNsYaml, map[string]*bintree{}}, + "role.yaml": {assetsComponentsServiceCaRoleYaml, map[string]*bintree{}}, + "rolebinding.yaml": {assetsComponentsServiceCaRolebindingYaml, map[string]*bintree{}}, + "sa.yaml": {assetsComponentsServiceCaSaYaml, map[string]*bintree{}}, + "signing-cabundle.yaml": {assetsComponentsServiceCaSigningCabundleYaml, map[string]*bintree{}}, + "signing-secret.yaml": {assetsComponentsServiceCaSigningSecretYaml, map[string]*bintree{}}, + }}, + }}, + "core": {nil, map[string]*bintree{ + "0000_50_cluster-openshift-controller-manager_00_namespace.yaml": {assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, map[string]*bintree{}}, + }}, "crd": {nil, map[string]*bintree{ "0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml": {assetsCrd0000_03_authorizationOpenshift_01_rolebindingrestrictionCrdYaml, map[string]*bintree{}}, "0000_03_config-operator_01_proxy.crd.yaml": {assetsCrd0000_03_configOperator_01_proxyCrdYaml, map[string]*bintree{}}, @@ -3547,6 +6045,15 @@ var _bintree = &bintree{nil, map[string]*bintree{ "0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml": {assetsCrd0000_10_configOperator_01_imagecontentsourcepolicyCrdYaml, map[string]*bintree{}}, "0000_11_imageregistry-configs.crd.yaml": {assetsCrd0000_11_imageregistryConfigsCrdYaml, map[string]*bintree{}}, }}, + "scc": {nil, map[string]*bintree{ + "0000_20_kube-apiserver-operator_00_scc-anyuid.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-nonroot.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-privileged.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, map[string]*bintree{}}, + "0000_20_kube-apiserver-operator_00_scc-restricted.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, map[string]*bintree{}}, + }}, }}, }} diff --git a/pkg/assets/core.go b/pkg/assets/core.go old mode 100755 new mode 100644 index 087e1a62467..1e840cf273c --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - coreassets "github.com/openshift/microshift/pkg/assets/core" - "k8s.io/klog/v2" "k8s.io/client-go/rest" @@ -190,7 +188,7 @@ func applyCore(cores []string, applier readerApplier, render RenderFunc, params for _, core := range cores { klog.Infof("Applying corev1 api %s", core) - objBytes, err := coreassets.Asset(core) + objBytes, err := Asset(core) if err != nil { return fmt.Errorf("error getting asset %s: %v", core, err) } diff --git a/pkg/assets/core/bindata.go b/pkg/assets/core/bindata.go deleted file mode 100644 index dfacf35ac9e..00000000000 --- a/pkg/assets/core/bindata.go +++ /dev/null @@ -1,825 +0,0 @@ -// Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. -// sources: -// assets/core/0000_00_flannel-configmap.yaml -// assets/core/0000_00_flannel-service-account.yaml -// assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml -// assets/core/0000_60_service-ca_01_namespace.yaml -// assets/core/0000_60_service-ca_04_configmap.yaml -// assets/core/0000_60_service-ca_04_sa.yaml -// assets/core/0000_60_service-ca_04_secret.yaml -// assets/core/0000_70_dns_00-namespace.yaml -// assets/core/0000_70_dns_01-configmap.yaml -// assets/core/0000_70_dns_01-dns-service-account.yaml -// assets/core/0000_70_dns_01-node-resolver-service-account.yaml -// assets/core/0000_70_dns_01-service.yaml -// assets/core/0000_80_hostpath-provisioner-namespace.yaml -// assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml -// assets/core/0000_80_openshift-router-cm.yaml -// assets/core/0000_80_openshift-router-external-service.yaml -// assets/core/0000_80_openshift-router-namespace.yaml -// assets/core/0000_80_openshift-router-service-account.yaml -// assets/core/0000_80_openshift-router-service.yaml -package assets - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _assetsCore0000_00_flannelConfigmapYaml = []byte(`kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "forceAddress": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - net-conf.json: | - { - "Network": "10.42.0.0/16", - "Backend": { - "Type": "vxlan" - } - }`) - -func assetsCore0000_00_flannelConfigmapYamlBytes() ([]byte, error) { - return _assetsCore0000_00_flannelConfigmapYaml, nil -} - -func assetsCore0000_00_flannelConfigmapYaml() (*asset, error) { - bytes, err := assetsCore0000_00_flannelConfigmapYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_00_flannel-configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_00_flannelServiceAccountYaml = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system`) - -func assetsCore0000_00_flannelServiceAccountYamlBytes() ([]byte, error) { - return _assetsCore0000_00_flannelServiceAccountYaml, nil -} - -func assetsCore0000_00_flannelServiceAccountYaml() (*asset, error) { - bytes, err := assetsCore0000_00_flannelServiceAccountYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_00_flannel-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml = []byte(`apiVersion: v1 -kind: Namespace -metadata: - annotations: - include.release.openshift.io/self-managed-high-availability: "true" - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" - name: openshift-controller-manager -`) - -func assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYamlBytes() ([]byte, error) { - return _assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, nil -} - -func assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml() (*asset, error) { - bytes, err := assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_60_serviceCa_01_namespaceYaml = []byte(`apiVersion: v1 -kind: Namespace -metadata: - name: openshift-service-ca - annotations: - openshift.io/node-selector: "" - workload.openshift.io/allowed: "management" -`) - -func assetsCore0000_60_serviceCa_01_namespaceYamlBytes() ([]byte, error) { - return _assetsCore0000_60_serviceCa_01_namespaceYaml, nil -} - -func assetsCore0000_60_serviceCa_01_namespaceYaml() (*asset, error) { - bytes, err := assetsCore0000_60_serviceCa_01_namespaceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_60_service-ca_01_namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_60_serviceCa_04_configmapYaml = []byte(`apiVersion: v1 -kind: ConfigMap -metadata: - namespace: openshift-service-ca - name: signing-cabundle -data: - ca-bundle.crt: -`) - -func assetsCore0000_60_serviceCa_04_configmapYamlBytes() ([]byte, error) { - return _assetsCore0000_60_serviceCa_04_configmapYaml, nil -} - -func assetsCore0000_60_serviceCa_04_configmapYaml() (*asset, error) { - bytes, err := assetsCore0000_60_serviceCa_04_configmapYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_60_service-ca_04_configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_60_serviceCa_04_saYaml = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: openshift-service-ca - name: service-ca -`) - -func assetsCore0000_60_serviceCa_04_saYamlBytes() ([]byte, error) { - return _assetsCore0000_60_serviceCa_04_saYaml, nil -} - -func assetsCore0000_60_serviceCa_04_saYaml() (*asset, error) { - bytes, err := assetsCore0000_60_serviceCa_04_saYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_60_service-ca_04_sa.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_60_serviceCa_04_secretYaml = []byte(`apiVersion: v1 -kind: Secret -metadata: - namespace: openshift-service-ca - name: signing-key -type: kubernetes.io/tls -data: - tls.crt: - tls.key: -`) - -func assetsCore0000_60_serviceCa_04_secretYamlBytes() ([]byte, error) { - return _assetsCore0000_60_serviceCa_04_secretYaml, nil -} - -func assetsCore0000_60_serviceCa_04_secretYaml() (*asset, error) { - bytes, err := assetsCore0000_60_serviceCa_04_secretYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_60_service-ca_04_secret.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_70_dns_00NamespaceYaml = []byte(`kind: Namespace -apiVersion: v1 -metadata: - annotations: - openshift.io/node-selector: "" - workload.openshift.io/allowed: "management" - name: openshift-dns - labels: - # set value to avoid depending on kube admission that depends on openshift apis - openshift.io/run-level: "0" - # allow openshift-monitoring to look for ServiceMonitor objects in this namespace - openshift.io/cluster-monitoring: "true" -`) - -func assetsCore0000_70_dns_00NamespaceYamlBytes() ([]byte, error) { - return _assetsCore0000_70_dns_00NamespaceYaml, nil -} - -func assetsCore0000_70_dns_00NamespaceYaml() (*asset, error) { - bytes, err := assetsCore0000_70_dns_00NamespaceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_70_dns_00-namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_70_dns_01ConfigmapYaml = []byte(`apiVersion: v1 -data: - Corefile: | - .:5353 { - bufsize 512 - errors - health { - lameduck 20s - } - ready - kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - } - prometheus 127.0.0.1:9153 - forward . /etc/resolv.conf { - policy sequential - } - cache 900 { - denial 9984 30 - } - reload - } -kind: ConfigMap -metadata: - labels: - dns.operator.openshift.io/owning-dns: default - name: dns-default - namespace: openshift-dns -`) - -func assetsCore0000_70_dns_01ConfigmapYamlBytes() ([]byte, error) { - return _assetsCore0000_70_dns_01ConfigmapYaml, nil -} - -func assetsCore0000_70_dns_01ConfigmapYaml() (*asset, error) { - bytes, err := assetsCore0000_70_dns_01ConfigmapYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_70_dns_01-configmap.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_70_dns_01DnsServiceAccountYaml = []byte(`kind: ServiceAccount -apiVersion: v1 -metadata: - name: dns - namespace: openshift-dns -`) - -func assetsCore0000_70_dns_01DnsServiceAccountYamlBytes() ([]byte, error) { - return _assetsCore0000_70_dns_01DnsServiceAccountYaml, nil -} - -func assetsCore0000_70_dns_01DnsServiceAccountYaml() (*asset, error) { - bytes, err := assetsCore0000_70_dns_01DnsServiceAccountYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_70_dns_01-dns-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_70_dns_01NodeResolverServiceAccountYaml = []byte(`kind: ServiceAccount -apiVersion: v1 -metadata: - name: node-resolver - namespace: openshift-dns -`) - -func assetsCore0000_70_dns_01NodeResolverServiceAccountYamlBytes() ([]byte, error) { - return _assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, nil -} - -func assetsCore0000_70_dns_01NodeResolverServiceAccountYaml() (*asset, error) { - bytes, err := assetsCore0000_70_dns_01NodeResolverServiceAccountYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_70_dns_01-node-resolver-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_70_dns_01ServiceYaml = []byte(`kind: Service -apiVersion: v1 -metadata: - annotations: - service.beta.openshift.io/serving-cert-secret-name: dns-default-metrics-tls - labels: - dns.operator.openshift.io/owning-dns: default - name: dns-default - namespace: openshift-dns -spec: - clusterIP: {{.ClusterIP}} - selector: - dns.operator.openshift.io/daemonset-dns: default - ports: - - name: dns - port: 53 - targetPort: dns - protocol: UDP - - name: dns-tcp - port: 53 - targetPort: dns-tcp - protocol: TCP - - name: metrics - port: 9154 - targetPort: metrics - protocol: TCP - # TODO: Uncomment when service topology feature gate is enabled. - #topologyKeys: - # - "kubernetes.io/hostname" - # - "*" -`) - -func assetsCore0000_70_dns_01ServiceYamlBytes() ([]byte, error) { - return _assetsCore0000_70_dns_01ServiceYaml, nil -} - -func assetsCore0000_70_dns_01ServiceYaml() (*asset, error) { - bytes, err := assetsCore0000_70_dns_01ServiceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_70_dns_01-service.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_hostpathProvisionerNamespaceYaml = []byte(`apiVersion: v1 -kind: Namespace -metadata: - name: kubevirt-hostpath-provisioner`) - -func assetsCore0000_80_hostpathProvisionerNamespaceYamlBytes() ([]byte, error) { - return _assetsCore0000_80_hostpathProvisionerNamespaceYaml, nil -} - -func assetsCore0000_80_hostpathProvisionerNamespaceYaml() (*asset, error) { - bytes, err := assetsCore0000_80_hostpathProvisionerNamespaceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_hostpath-provisioner-namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_hostpathProvisionerServiceaccountYaml = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - name: kubevirt-hostpath-provisioner-admin - namespace: kubevirt-hostpath-provisioner`) - -func assetsCore0000_80_hostpathProvisionerServiceaccountYamlBytes() ([]byte, error) { - return _assetsCore0000_80_hostpathProvisionerServiceaccountYaml, nil -} - -func assetsCore0000_80_hostpathProvisionerServiceaccountYaml() (*asset, error) { - bytes, err := assetsCore0000_80_hostpathProvisionerServiceaccountYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_openshiftRouterCmYaml = []byte(`apiVersion: v1 -kind: ConfigMap -metadata: - namespace: openshift-ingress - name: service-ca-bundle - annotations: - service.beta.openshift.io/inject-cabundle: "true" -`) - -func assetsCore0000_80_openshiftRouterCmYamlBytes() ([]byte, error) { - return _assetsCore0000_80_openshiftRouterCmYaml, nil -} - -func assetsCore0000_80_openshiftRouterCmYaml() (*asset, error) { - bytes, err := assetsCore0000_80_openshiftRouterCmYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_openshift-router-cm.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_openshiftRouterExternalServiceYaml = []byte(`kind: Service -apiVersion: v1 -metadata: - annotations: - service.alpha.openshift.io/serving-cert-secret-name: router-certs-default - labels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - name: router-external-default - namespace: openshift-ingress -spec: - selector: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - nodePort: 30001 - - name: https - port: 443 - targetPort: 443 - nodePort: 30002 -`) - -func assetsCore0000_80_openshiftRouterExternalServiceYamlBytes() ([]byte, error) { - return _assetsCore0000_80_openshiftRouterExternalServiceYaml, nil -} - -func assetsCore0000_80_openshiftRouterExternalServiceYaml() (*asset, error) { - bytes, err := assetsCore0000_80_openshiftRouterExternalServiceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_openshift-router-external-service.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_openshiftRouterNamespaceYaml = []byte(`kind: Namespace -apiVersion: v1 -metadata: - name: openshift-ingress - annotations: - openshift.io/node-selector: "" - workload.openshift.io/allowed: "management" - labels: - # allow openshift-monitoring to look for ServiceMonitor objects in this namespace - openshift.io/cluster-monitoring: "true" - name: openshift-ingress - # old and new forms of the label for matching with NetworkPolicy - network.openshift.io/policy-group: ingress - policy-group.network.openshift.io/ingress: "" -`) - -func assetsCore0000_80_openshiftRouterNamespaceYamlBytes() ([]byte, error) { - return _assetsCore0000_80_openshiftRouterNamespaceYaml, nil -} - -func assetsCore0000_80_openshiftRouterNamespaceYaml() (*asset, error) { - bytes, err := assetsCore0000_80_openshiftRouterNamespaceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_openshift-router-namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_openshiftRouterServiceAccountYaml = []byte(`# Account for routers created by the operator. It will require cluster scoped -# permissions related to Route processing. -kind: ServiceAccount -apiVersion: v1 -metadata: - name: router - namespace: openshift-ingress -`) - -func assetsCore0000_80_openshiftRouterServiceAccountYamlBytes() ([]byte, error) { - return _assetsCore0000_80_openshiftRouterServiceAccountYaml, nil -} - -func assetsCore0000_80_openshiftRouterServiceAccountYaml() (*asset, error) { - bytes, err := assetsCore0000_80_openshiftRouterServiceAccountYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_openshift-router-service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsCore0000_80_openshiftRouterServiceYaml = []byte(`# Cluster Service with default values -# Ingress Controller specific annotations are applied at runtime. -kind: Service -apiVersion: v1 -metadata: - annotations: - service.alpha.openshift.io/serving-cert-secret-name: router-certs-default - labels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - name: router-internal-default - namespace: openshift-ingress -spec: - selector: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - type: ClusterIP - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - - name: https - port: 443 - protocol: TCP - targetPort: https - - name: metrics - port: 1936 - protocol: TCP - targetPort: 1936 -`) - -func assetsCore0000_80_openshiftRouterServiceYamlBytes() ([]byte, error) { - return _assetsCore0000_80_openshiftRouterServiceYaml, nil -} - -func assetsCore0000_80_openshiftRouterServiceYaml() (*asset, error) { - bytes, err := assetsCore0000_80_openshiftRouterServiceYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/core/0000_80_openshift-router-service.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "assets/core/0000_00_flannel-configmap.yaml": assetsCore0000_00_flannelConfigmapYaml, - "assets/core/0000_00_flannel-service-account.yaml": assetsCore0000_00_flannelServiceAccountYaml, - "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml": assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, - "assets/core/0000_60_service-ca_01_namespace.yaml": assetsCore0000_60_serviceCa_01_namespaceYaml, - "assets/core/0000_60_service-ca_04_configmap.yaml": assetsCore0000_60_serviceCa_04_configmapYaml, - "assets/core/0000_60_service-ca_04_sa.yaml": assetsCore0000_60_serviceCa_04_saYaml, - "assets/core/0000_60_service-ca_04_secret.yaml": assetsCore0000_60_serviceCa_04_secretYaml, - "assets/core/0000_70_dns_00-namespace.yaml": assetsCore0000_70_dns_00NamespaceYaml, - "assets/core/0000_70_dns_01-configmap.yaml": assetsCore0000_70_dns_01ConfigmapYaml, - "assets/core/0000_70_dns_01-dns-service-account.yaml": assetsCore0000_70_dns_01DnsServiceAccountYaml, - "assets/core/0000_70_dns_01-node-resolver-service-account.yaml": assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, - "assets/core/0000_70_dns_01-service.yaml": assetsCore0000_70_dns_01ServiceYaml, - "assets/core/0000_80_hostpath-provisioner-namespace.yaml": assetsCore0000_80_hostpathProvisionerNamespaceYaml, - "assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml": assetsCore0000_80_hostpathProvisionerServiceaccountYaml, - "assets/core/0000_80_openshift-router-cm.yaml": assetsCore0000_80_openshiftRouterCmYaml, - "assets/core/0000_80_openshift-router-external-service.yaml": assetsCore0000_80_openshiftRouterExternalServiceYaml, - "assets/core/0000_80_openshift-router-namespace.yaml": assetsCore0000_80_openshiftRouterNamespaceYaml, - "assets/core/0000_80_openshift-router-service-account.yaml": assetsCore0000_80_openshiftRouterServiceAccountYaml, - "assets/core/0000_80_openshift-router-service.yaml": assetsCore0000_80_openshiftRouterServiceYaml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "assets": {nil, map[string]*bintree{ - "core": {nil, map[string]*bintree{ - "0000_00_flannel-configmap.yaml": {assetsCore0000_00_flannelConfigmapYaml, map[string]*bintree{}}, - "0000_00_flannel-service-account.yaml": {assetsCore0000_00_flannelServiceAccountYaml, map[string]*bintree{}}, - "0000_50_cluster-openshift-controller-manager_00_namespace.yaml": {assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, map[string]*bintree{}}, - "0000_60_service-ca_01_namespace.yaml": {assetsCore0000_60_serviceCa_01_namespaceYaml, map[string]*bintree{}}, - "0000_60_service-ca_04_configmap.yaml": {assetsCore0000_60_serviceCa_04_configmapYaml, map[string]*bintree{}}, - "0000_60_service-ca_04_sa.yaml": {assetsCore0000_60_serviceCa_04_saYaml, map[string]*bintree{}}, - "0000_60_service-ca_04_secret.yaml": {assetsCore0000_60_serviceCa_04_secretYaml, map[string]*bintree{}}, - "0000_70_dns_00-namespace.yaml": {assetsCore0000_70_dns_00NamespaceYaml, map[string]*bintree{}}, - "0000_70_dns_01-configmap.yaml": {assetsCore0000_70_dns_01ConfigmapYaml, map[string]*bintree{}}, - "0000_70_dns_01-dns-service-account.yaml": {assetsCore0000_70_dns_01DnsServiceAccountYaml, map[string]*bintree{}}, - "0000_70_dns_01-node-resolver-service-account.yaml": {assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, map[string]*bintree{}}, - "0000_70_dns_01-service.yaml": {assetsCore0000_70_dns_01ServiceYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-namespace.yaml": {assetsCore0000_80_hostpathProvisionerNamespaceYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-serviceaccount.yaml": {assetsCore0000_80_hostpathProvisionerServiceaccountYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cm.yaml": {assetsCore0000_80_openshiftRouterCmYaml, map[string]*bintree{}}, - "0000_80_openshift-router-external-service.yaml": {assetsCore0000_80_openshiftRouterExternalServiceYaml, map[string]*bintree{}}, - "0000_80_openshift-router-namespace.yaml": {assetsCore0000_80_openshiftRouterNamespaceYaml, map[string]*bintree{}}, - "0000_80_openshift-router-service-account.yaml": {assetsCore0000_80_openshiftRouterServiceAccountYaml, map[string]*bintree{}}, - "0000_80_openshift-router-service.yaml": {assetsCore0000_80_openshiftRouterServiceYaml, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/pkg/assets/crd.go b/pkg/assets/crd.go old mode 100755 new mode 100644 index c2506f3b028..c1d74c136f1 --- a/pkg/assets/crd.go +++ b/pkg/assets/crd.go @@ -7,7 +7,6 @@ import ( klog "k8s.io/klog/v2" - crd_assets "github.com/openshift/microshift/pkg/assets/crd" "github.com/openshift/microshift/pkg/config" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -78,7 +77,7 @@ func WaitForCrdsEstablished(cfg *config.MicroshiftConfig) error { for _, crd := range crds { klog.Infof("Waiting for crd %s condition.type: established", crd) var crdBytes []byte - crdBytes, err = crd_assets.Asset(crd) + crdBytes, err = Asset(crd) if err != nil { return fmt.Errorf("error getting asset %s: %v", crd, err) } @@ -152,7 +151,7 @@ func ApplyCRDs(cfg *config.MicroshiftConfig) error { for _, crd := range crds { klog.Infof("Applying openshift CRD %s", crd) - crdBytes, err := crd_assets.Asset(crd) + crdBytes, err := Asset(crd) if err != nil { return fmt.Errorf("error getting asset %s: %v", crd, err) } diff --git a/pkg/assets/rbac.go b/pkg/assets/rbac.go old mode 100755 new mode 100644 index f8928681f86..958e14ac9c5 --- a/pkg/assets/rbac.go +++ b/pkg/assets/rbac.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - rbacassets "github.com/openshift/microshift/pkg/assets/rbac" - "k8s.io/klog/v2" "k8s.io/client-go/rest" @@ -164,7 +162,7 @@ func applyRbac(rbacs []string, applier readerApplier) error { for _, rbac := range rbacs { klog.Infof("Applying rbac %s", rbac) - objBytes, err := rbacassets.Asset(rbac) + objBytes, err := Asset(rbac) if err != nil { return fmt.Errorf("error getting asset %s: %v", rbac, err) } diff --git a/pkg/assets/rbac/bindata.go b/pkg/assets/rbac/bindata.go deleted file mode 100644 index 6f5979161ae..00000000000 --- a/pkg/assets/rbac/bindata.go +++ /dev/null @@ -1,835 +0,0 @@ -// Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. -// sources: -// assets/rbac/0000_00_flannel-clusterrole.yaml -// assets/rbac/0000_00_flannel-clusterrolebinding.yaml -// assets/rbac/0000_00_podsecuritypolicy-flannel.yaml -// assets/rbac/0000_60_service-ca_00_clusterrole.yaml -// assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml -// assets/rbac/0000_60_service-ca_00_role.yaml -// assets/rbac/0000_60_service-ca_00_rolebinding.yaml -// assets/rbac/0000_70_dns_01-cluster-role-binding.yaml -// assets/rbac/0000_70_dns_01-cluster-role.yaml -// assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml -// assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml -// assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml -// assets/rbac/0000_80_openshift-router-cluster-role.yaml -package assets - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _assetsRbac0000_00_flannelClusterroleYaml = []byte(`kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: ['psp.flannel.unprivileged'] -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch`) - -func assetsRbac0000_00_flannelClusterroleYamlBytes() ([]byte, error) { - return _assetsRbac0000_00_flannelClusterroleYaml, nil -} - -func assetsRbac0000_00_flannelClusterroleYaml() (*asset, error) { - bytes, err := assetsRbac0000_00_flannelClusterroleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_00_flannel-clusterrole.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_00_flannelClusterrolebindingYaml = []byte(`kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system`) - -func assetsRbac0000_00_flannelClusterrolebindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_00_flannelClusterrolebindingYaml, nil -} - -func assetsRbac0000_00_flannelClusterrolebindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_00_flannelClusterrolebindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_00_flannel-clusterrolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_00_podsecuritypolicyFlannelYaml = []byte(`apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: psp.flannel.unprivileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default - seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default - apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default - apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default -spec: - privileged: false - volumes: - - configMap - - secret - - emptyDir - - hostPath - allowedHostPaths: - - pathPrefix: "/etc/cni/net.d" - - pathPrefix: "/etc/kube-flannel" - - pathPrefix: "/run/flannel" - readOnlyRootFilesystem: false - # Users and groups - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - fsGroup: - rule: RunAsAny - # Privilege Escalation - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - # Capabilities - allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] - defaultAddCapabilities: [] - requiredDropCapabilities: [] - # Host namespaces - hostPID: false - hostIPC: false - hostNetwork: true - hostPorts: - - min: 0 - max: 65535 - # SELinux - seLinux: - # SELinux is unused in CaaSP - rule: 'RunAsAny'`) - -func assetsRbac0000_00_podsecuritypolicyFlannelYamlBytes() ([]byte, error) { - return _assetsRbac0000_00_podsecuritypolicyFlannelYaml, nil -} - -func assetsRbac0000_00_podsecuritypolicyFlannelYaml() (*asset, error) { - bytes, err := assetsRbac0000_00_podsecuritypolicyFlannelYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_60_serviceCa_00_clusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:openshift:controller:service-ca -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - update - - patch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - watch - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - watch - - update -- apiGroups: - - apiregistration.k8s.io - resources: - - apiservices - verbs: - - get - - list - - watch - - update - - patch -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - update -`) - -func assetsRbac0000_60_serviceCa_00_clusterroleYamlBytes() ([]byte, error) { - return _assetsRbac0000_60_serviceCa_00_clusterroleYaml, nil -} - -func assetsRbac0000_60_serviceCa_00_clusterroleYaml() (*asset, error) { - bytes, err := assetsRbac0000_60_serviceCa_00_clusterroleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_60_service-ca_00_clusterrole.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:openshift:controller:service-ca -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - namespace: openshift-service-ca - name: service-ca -`) - -func assetsRbac0000_60_serviceCa_00_clusterrolebindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, nil -} - -func assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_60_serviceCa_00_clusterrolebindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_60_serviceCa_00_roleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: system:openshift:controller:service-ca - namespace: openshift-service-ca -rules: -- apiGroups: - - security.openshift.io - resources: - - securitycontextconstraints - resourceNames: - - restricted - verbs: - - use -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - update - - create -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch -- apiGroups: - - "apps" - resources: - - replicasets - - deployments - verbs: - - get - - list - - watch`) - -func assetsRbac0000_60_serviceCa_00_roleYamlBytes() ([]byte, error) { - return _assetsRbac0000_60_serviceCa_00_roleYaml, nil -} - -func assetsRbac0000_60_serviceCa_00_roleYaml() (*asset, error) { - bytes, err := assetsRbac0000_60_serviceCa_00_roleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_60_service-ca_00_role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_60_serviceCa_00_rolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: system:openshift:controller:service-ca - namespace: openshift-service-ca -roleRef: - kind: Role - name: system:openshift:controller:service-ca - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: ServiceAccount - namespace: openshift-service-ca - name: service-ca -`) - -func assetsRbac0000_60_serviceCa_00_rolebindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_60_serviceCa_00_rolebindingYaml, nil -} - -func assetsRbac0000_60_serviceCa_00_rolebindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_60_serviceCa_00_rolebindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_60_service-ca_00_rolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_70_dns_01ClusterRoleBindingYaml = []byte(`kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openshift-dns -subjects: -- kind: ServiceAccount - name: dns - namespace: openshift-dns -roleRef: - kind: ClusterRole - name: openshift-dns -`) - -func assetsRbac0000_70_dns_01ClusterRoleBindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_70_dns_01ClusterRoleBindingYaml, nil -} - -func assetsRbac0000_70_dns_01ClusterRoleBindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_70_dns_01ClusterRoleBindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_70_dns_01-cluster-role-binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_70_dns_01ClusterRoleYaml = []byte(`kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openshift-dns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch - -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch - -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create -`) - -func assetsRbac0000_70_dns_01ClusterRoleYamlBytes() ([]byte, error) { - return _assetsRbac0000_70_dns_01ClusterRoleYaml, nil -} - -func assetsRbac0000_70_dns_01ClusterRoleYaml() (*asset, error) { - bytes, err := assetsRbac0000_70_dns_01ClusterRoleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_70_dns_01-cluster-role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_80_hostpathProvisionerClusterroleYaml = []byte(`kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kubevirt-hostpath-provisioner -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] -`) - -func assetsRbac0000_80_hostpathProvisionerClusterroleYamlBytes() ([]byte, error) { - return _assetsRbac0000_80_hostpathProvisionerClusterroleYaml, nil -} - -func assetsRbac0000_80_hostpathProvisionerClusterroleYaml() (*asset, error) { - bytes, err := assetsRbac0000_80_hostpathProvisionerClusterroleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kubevirt-hostpath-provisioner -subjects: -- kind: ServiceAccount - name: kubevirt-hostpath-provisioner-admin - namespace: kubevirt-hostpath-provisioner -roleRef: - kind: ClusterRole - name: kubevirt-hostpath-provisioner - apiGroup: rbac.authorization.k8s.io`) - -func assetsRbac0000_80_hostpathProvisionerClusterrolebindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, nil -} - -func assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_80_hostpathProvisionerClusterrolebindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml = []byte(`# Binds the router role to its Service Account. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openshift-ingress-router -subjects: -- kind: ServiceAccount - name: router - namespace: openshift-ingress -roleRef: - kind: ClusterRole - name: openshift-ingress-router - namespace: openshift-ingress -`) - -func assetsRbac0000_80_openshiftRouterClusterRoleBindingYamlBytes() ([]byte, error) { - return _assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, nil -} - -func assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml() (*asset, error) { - bytes, err := assetsRbac0000_80_openshiftRouterClusterRoleBindingYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsRbac0000_80_openshiftRouterClusterRoleYaml = []byte(`# Cluster scoped role for routers. This should be as restrictive as possible. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: openshift-ingress-router -rules: -- apiGroups: - - "" - resources: - - endpoints - - namespaces - - services - verbs: - - list - - watch - -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create - -- apiGroups: - - route.openshift.io - resources: - - routes - verbs: - - list - - watch - -- apiGroups: - - route.openshift.io - resources: - - routes/status - verbs: - - update - -- apiGroups: - - security.openshift.io - resources: - - securitycontextconstraints - verbs: - - use - resourceNames: - - hostnetwork - -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch -`) - -func assetsRbac0000_80_openshiftRouterClusterRoleYamlBytes() ([]byte, error) { - return _assetsRbac0000_80_openshiftRouterClusterRoleYaml, nil -} - -func assetsRbac0000_80_openshiftRouterClusterRoleYaml() (*asset, error) { - bytes, err := assetsRbac0000_80_openshiftRouterClusterRoleYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/rbac/0000_80_openshift-router-cluster-role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "assets/rbac/0000_00_flannel-clusterrole.yaml": assetsRbac0000_00_flannelClusterroleYaml, - "assets/rbac/0000_00_flannel-clusterrolebinding.yaml": assetsRbac0000_00_flannelClusterrolebindingYaml, - "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml": assetsRbac0000_00_podsecuritypolicyFlannelYaml, - "assets/rbac/0000_60_service-ca_00_clusterrole.yaml": assetsRbac0000_60_serviceCa_00_clusterroleYaml, - "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml": assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, - "assets/rbac/0000_60_service-ca_00_role.yaml": assetsRbac0000_60_serviceCa_00_roleYaml, - "assets/rbac/0000_60_service-ca_00_rolebinding.yaml": assetsRbac0000_60_serviceCa_00_rolebindingYaml, - "assets/rbac/0000_70_dns_01-cluster-role-binding.yaml": assetsRbac0000_70_dns_01ClusterRoleBindingYaml, - "assets/rbac/0000_70_dns_01-cluster-role.yaml": assetsRbac0000_70_dns_01ClusterRoleYaml, - "assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml": assetsRbac0000_80_hostpathProvisionerClusterroleYaml, - "assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml": assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, - "assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml": assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, - "assets/rbac/0000_80_openshift-router-cluster-role.yaml": assetsRbac0000_80_openshiftRouterClusterRoleYaml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "assets": {nil, map[string]*bintree{ - "rbac": {nil, map[string]*bintree{ - "0000_00_flannel-clusterrole.yaml": {assetsRbac0000_00_flannelClusterroleYaml, map[string]*bintree{}}, - "0000_00_flannel-clusterrolebinding.yaml": {assetsRbac0000_00_flannelClusterrolebindingYaml, map[string]*bintree{}}, - "0000_00_podsecuritypolicy-flannel.yaml": {assetsRbac0000_00_podsecuritypolicyFlannelYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_clusterrole.yaml": {assetsRbac0000_60_serviceCa_00_clusterroleYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_clusterrolebinding.yaml": {assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_role.yaml": {assetsRbac0000_60_serviceCa_00_roleYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_rolebinding.yaml": {assetsRbac0000_60_serviceCa_00_rolebindingYaml, map[string]*bintree{}}, - "0000_70_dns_01-cluster-role-binding.yaml": {assetsRbac0000_70_dns_01ClusterRoleBindingYaml, map[string]*bintree{}}, - "0000_70_dns_01-cluster-role.yaml": {assetsRbac0000_70_dns_01ClusterRoleYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-clusterrole.yaml": {assetsRbac0000_80_hostpathProvisionerClusterroleYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-clusterrolebinding.yaml": {assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cluster-role-binding.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cluster-role.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleYaml, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/pkg/assets/scc.go b/pkg/assets/scc.go index 84e8b45dff0..79888793af3 100644 --- a/pkg/assets/scc.go +++ b/pkg/assets/scc.go @@ -6,8 +6,6 @@ import ( "k8s.io/klog/v2" - sccassets "github.com/openshift/microshift/pkg/assets/scc" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -73,7 +71,7 @@ func applySCCs(sccs []string, applier readerApplier, render RenderFunc, params R for _, scc := range sccs { klog.Infof("Applying scc api %s", scc) - objBytes, err := sccassets.Asset(scc) + objBytes, err := Asset(scc) if err != nil { return fmt.Errorf("error getting asset %s: %v", scc, err) } diff --git a/pkg/assets/scc/bindata.go b/pkg/assets/scc/bindata.go deleted file mode 100644 index cef68c477f7..00000000000 --- a/pkg/assets/scc/bindata.go +++ /dev/null @@ -1,705 +0,0 @@ -// Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. -// sources: -// assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml -// assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml -// assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml -package assets - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml = []byte(`allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: RunAsAny -groups: -- system:cluster-admins -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: anyuid provides all features of the restricted SCC - but allows users to run with any UID and any GID. - name: anyuid -priority: 10 -readOnlyRootFilesystem: false -requiredDropCapabilities: -- MKNOD -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml = []byte(`allowHostDirVolumePlugin: true -allowHostIPC: true -allowHostNetwork: true -allowHostPID: true -allowHostPorts: true -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: MustRunAs -groups: [] -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: 'hostaccess allows access to all host namespaces but - still requires pods to be run with a UID and SELinux context that are allocated - to the namespace. WARNING: this SCC allows host access to namespaces, file systems, - and PIDS. It should only be used by trusted pods. Grant with caution.' - name: hostaccess -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: MustRunAsRange -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- hostPath -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml = []byte(`allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: RunAsAny -groups: [] -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: |- - hostmount-anyuid provides all the features of the - restricted SCC but allows host mounts and any UID by a pod. This is primarily - used by the persistent volume recycler. WARNING: this SCC allows host file - system access as any UID, including UID 0. Grant with caution. - name: hostmount-anyuid -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -- MKNOD -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: -- system:serviceaccount:openshift-infra:pv-recycler-controller -volumes: -- configMap -- downwardAPI -- emptyDir -- hostPath -- nfs -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml = []byte(`allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: true -allowHostPID: false -allowHostPorts: true -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: MustRunAs -groups: [] -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: hostnetwork allows using host networking and host ports - but still requires pods to be run with a UID and SELinux context that are allocated - to the namespace. - name: hostnetwork -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: MustRunAsRange -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: MustRunAs -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml = []byte(`allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: RunAsAny -groups: [] -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: nonroot provides all features of the restricted SCC - but allows users to run with any non-root UID. The user must specify the UID - or it must be specified on the by the manifest of the container runtime. - name: nonroot -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: MustRunAsNonRoot -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml = []byte(`allowHostDirVolumePlugin: true -allowHostIPC: true -allowHostNetwork: true -allowHostPID: true -allowHostPorts: true -allowPrivilegeEscalation: true -allowPrivilegedContainer: true -allowedCapabilities: -- "*" -allowedUnsafeSysctls: -- "*" -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: RunAsAny -groups: -- system:cluster-admins -- system:nodes -- system:masters -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: 'privileged allows access to all privileged and host - features and the ability to run as any user, any group, any fsGroup, and with - any SELinux context. WARNING: this is the most relaxed SCC and should be used - only for cluster administration. Grant with caution.' - name: privileged -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -runAsUser: - type: RunAsAny -seLinuxContext: - type: RunAsAny -seccompProfiles: -- "*" -supplementalGroups: - type: RunAsAny -users: -- system:admin -- system:serviceaccount:openshift-infra:build-controller -volumes: -- "*" -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml = []byte(`allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: -fsGroup: - type: MustRunAs -groups: -- system:authenticated -kind: SecurityContextConstraints -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - kubernetes.io/description: restricted denies access to all host features and requires - pods to be run with a UID, and SELinux context that are allocated to the namespace. This - is the most restrictive SCC and it is used by default for authenticated users. - name: restricted -priority: -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: MustRunAsRange -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret -`) - -func assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYamlBytes() ([]byte, error) { - return _assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, nil -} - -func assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml() (*asset, error) { - bytes, err := assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYaml = []byte(`kind: SecurityContextConstraints -apiVersion: security.openshift.io/v1 -metadata: - name: hostpath-provisioner -allowPrivilegedContainer: true -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: RunAsAny -seLinuxContext: - type: RunAsAny -fsGroup: - type: RunAsAny -supplementalGroups: - type: RunAsAny -allowHostDirVolumePlugin: true -users: -- system:serviceaccount:kubevirt-hostpath-provisioner:kubevirt-hostpath-provisioner-admin -volumes: -- hostPath -- secret -`) - -func assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYamlBytes() ([]byte, error) { - return _assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYaml, nil -} - -func assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYaml() (*asset, error) { - bytes, err := assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, - "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml": assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, - "assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml": assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYaml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "assets": {nil, map[string]*bintree{ - "scc": {nil, map[string]*bintree{ - "0000_20_kube-apiserver-operator_00_scc-anyuid.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-nonroot.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-privileged.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml, map[string]*bintree{}}, - "0000_20_kube-apiserver-operator_00_scc-restricted.yaml": {assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-securitycontextconstraints.yaml": {assetsScc0000_80_hostpathProvisionerSecuritycontextconstraintsYaml, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/pkg/assets/storage.go b/pkg/assets/storage.go index 899765a4d12..36181170c08 100644 --- a/pkg/assets/storage.go +++ b/pkg/assets/storage.go @@ -6,8 +6,6 @@ import ( "k8s.io/klog/v2" - scassets "github.com/openshift/microshift/pkg/assets/storage" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -73,7 +71,7 @@ func applySCs(scs []string, applier readerApplier, render RenderFunc, params Ren for _, sc := range scs { klog.Infof("Applying sc %s", sc) - objBytes, err := scassets.Asset(sc) + objBytes, err := Asset(sc) if err != nil { return fmt.Errorf("error getting asset %s: %v", sc, err) } diff --git a/pkg/assets/storage/bindata.go b/pkg/assets/storage/bindata.go deleted file mode 100644 index 3bbe255f75f..00000000000 --- a/pkg/assets/storage/bindata.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package assets Code generated by go-bindata. (@generated) DO NOT EDIT. -// sources: -// assets/storage/0000_80_hostpath-provisioner-storageclass.yaml -package assets - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _assetsStorage0000_80_hostpathProvisionerStorageclassYaml = []byte(`apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: kubevirt-hostpath-provisioner -provisioner: kubevirt.io/hostpath-provisioner -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer`) - -func assetsStorage0000_80_hostpathProvisionerStorageclassYamlBytes() ([]byte, error) { - return _assetsStorage0000_80_hostpathProvisionerStorageclassYaml, nil -} - -func assetsStorage0000_80_hostpathProvisionerStorageclassYaml() (*asset, error) { - bytes, err := assetsStorage0000_80_hostpathProvisionerStorageclassYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "assets/storage/0000_80_hostpath-provisioner-storageclass.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "assets/storage/0000_80_hostpath-provisioner-storageclass.yaml": assetsStorage0000_80_hostpathProvisionerStorageclassYaml, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "assets": {nil, map[string]*bintree{ - "storage": {nil, map[string]*bintree{ - "0000_80_hostpath-provisioner-storageclass.yaml": {assetsStorage0000_80_hostpathProvisionerStorageclassYaml, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/pkg/components/controllers.go b/pkg/components/controllers.go index 9e64194781a..2ae9a31564e 100644 --- a/pkg/components/controllers.go +++ b/pkg/components/controllers.go @@ -12,29 +12,29 @@ func startServiceCAController(cfg *config.MicroshiftConfig, kubeconfigPath strin var ( //TODO: fix the rolebinding and sa clusterRoleBinding = []string{ - "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml", + "assets/components/service-ca/clusterrolebinding.yaml", } clusterRole = []string{ - "assets/rbac/0000_60_service-ca_00_clusterrole.yaml", + "assets/components/service-ca/clusterrole.yaml", } roleBinding = []string{ - "assets/rbac/0000_60_service-ca_00_rolebinding.yaml", + "assets/components/service-ca/rolebinding.yaml", } role = []string{ - "assets/rbac/0000_60_service-ca_00_role.yaml", + "assets/components/service-ca/role.yaml", } apps = []string{ - "assets/apps/0000_60_service-ca_05_deploy.yaml", + "assets/components/service-ca/deployment.yaml", } ns = []string{ - "assets/core/0000_60_service-ca_01_namespace.yaml", + "assets/components/service-ca/ns.yaml", } sa = []string{ - "assets/core/0000_60_service-ca_04_sa.yaml", + "assets/components/service-ca/sa.yaml", } - secret = "assets/core/0000_60_service-ca_04_secret.yaml" + secret = "assets/components/service-ca/signing-secret.yaml" secretName = "signing-key" - cm = "assets/core/0000_60_service-ca_04_configmap.yaml" + cm = "assets/components/service-ca/signing-cabundle.yaml" cmName = "signing-cabundle" ) caPath := cfg.DataDir + "/certs/ca-bundle/ca-bundle.crt" @@ -100,28 +100,28 @@ func startServiceCAController(cfg *config.MicroshiftConfig, kubeconfigPath strin func startIngressController(cfg *config.MicroshiftConfig, kubeconfigPath string) error { var ( clusterRoleBinding = []string{ - "assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml", + "assets/components/openshift-router/cluster-role-binding.yaml", } clusterRole = []string{ - "assets/rbac/0000_80_openshift-router-cluster-role.yaml", + "assets/components/openshift-router/cluster-role.yaml", } apps = []string{ - "assets/apps/0000_80_openshift-router-deployment.yaml", + "assets/components/openshift-router/deployment.yaml", } ns = []string{ - "assets/core/0000_80_openshift-router-namespace.yaml", + "assets/components/openshift-router/namespace.yaml", } sa = []string{ - "assets/core/0000_80_openshift-router-service-account.yaml", + "assets/components/openshift-router/service-account.yaml", } cm = []string{ - "assets/core/0000_80_openshift-router-cm.yaml", + "assets/components/openshift-router/configmap.yaml", } svc = []string{ - "assets/core/0000_80_openshift-router-service.yaml", + "assets/components/openshift-router/service-internal.yaml", } extSvc = []string{ - "assets/core/0000_80_openshift-router-external-service.yaml", + "assets/components/openshift-router/service-cloud.yaml", } ) if err := assets.ApplyNamespaces(ns, kubeconfigPath); err != nil { @@ -162,27 +162,27 @@ func startIngressController(cfg *config.MicroshiftConfig, kubeconfigPath string) func startDNSController(cfg *config.MicroshiftConfig, kubeconfigPath string) error { var ( clusterRoleBinding = []string{ - "assets/rbac/0000_70_dns_01-cluster-role-binding.yaml", + "assets/components/openshift-dns/dns/cluster-role-binding.yaml", } clusterRole = []string{ - "assets/rbac/0000_70_dns_01-cluster-role.yaml", + "assets/components/openshift-dns/dns/cluster-role.yaml", } apps = []string{ - "assets/apps/0000_70_dns_01-dns-daemonset.yaml", - "assets/apps/0000_70_dns_01-node-resolver-daemonset.yaml", + "assets/components/openshift-dns/dns/daemonset.yaml", + "assets/components/openshift-dns/node-resolver/daemonset.yaml", } ns = []string{ - "assets/core/0000_70_dns_00-namespace.yaml", + "assets/components/openshift-dns/dns/namespace.yaml", } sa = []string{ - "assets/core/0000_70_dns_01-dns-service-account.yaml", - "assets/core/0000_70_dns_01-node-resolver-service-account.yaml", + "assets/components/openshift-dns/dns/service-account.yaml", + "assets/components/openshift-dns/node-resolver/service-account.yaml", } cm = []string{ - "assets/core/0000_70_dns_01-configmap.yaml", + "assets/components/openshift-dns/dns/configmap.yaml", } svc = []string{ - "assets/core/0000_70_dns_01-service.yaml", + "assets/components/openshift-dns/dns/service.yaml", } ) if err := assets.ApplyNamespaces(ns, kubeconfigPath); err != nil { diff --git a/pkg/components/networking.go b/pkg/components/networking.go index 58398032a1f..a9783c63729 100644 --- a/pkg/components/networking.go +++ b/pkg/components/networking.go @@ -8,22 +8,22 @@ import ( func startFlannel(kubeconfigPath string) error { var ( // psp = []string{ - // "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml", + // "assets/components/flannel/podsecuritypolicy.yaml", // } cr = []string{ - "assets/rbac/0000_00_flannel-clusterrole.yaml", + "assets/components/flannel/clusterrole.yaml", } crb = []string{ - "assets/rbac/0000_00_flannel-clusterrolebinding.yaml", + "assets/components/flannel/clusterrolebinding.yaml", } sa = []string{ - "assets/core/0000_00_flannel-service-account.yaml", + "assets/components/flannel/service-account.yaml", } cm = []string{ - "assets/core/0000_00_flannel-configmap.yaml", + "assets/components/flannel/configmap.yaml", } ds = []string{ - "assets/apps/0000_00_flannel-daemonset.yaml", + "assets/components/flannel/daemonset.yaml", } ) diff --git a/pkg/components/storage.go b/pkg/components/storage.go index 86bef55bba4..7c8b1a9a489 100644 --- a/pkg/components/storage.go +++ b/pkg/components/storage.go @@ -8,25 +8,25 @@ import ( func startHostpathProvisioner(kubeconfigPath string) error { var ( ns = []string{ - "assets/core/0000_80_hostpath-provisioner-namespace.yaml", + "assets/components/hostpath-provisioner/namespace.yaml", } sa = []string{ - "assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml", + "assets/components/hostpath-provisioner/service-account.yaml", } cr = []string{ - "assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml", + "assets/components/hostpath-provisioner/clusterrole.yaml", } crb = []string{ - "assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml", + "assets/components/hostpath-provisioner/clusterrolebinding.yaml", } scc = []string{ - "assets/scc/0000_80_hostpath-provisioner-securitycontextconstraints.yaml", + "assets/components/hostpath-provisioner/scc.yaml", } ds = []string{ - "assets/apps/000_80_hostpath-provisioner-daemonset.yaml", + "assets/components/hostpath-provisioner/daemonset.yaml", } sc = []string{ - "assets/storage/0000_80_hostpath-provisioner-storageclass.yaml", + "assets/components/hostpath-provisioner/storageclass.yaml", } ) if err := assets.ApplyNamespaces(ns, kubeconfigPath); err != nil { diff --git a/scripts/bindata.sh b/scripts/bindata.sh index de7f7855eff..2cf1996084b 100755 --- a/scripts/bindata.sh +++ b/scripts/bindata.sh @@ -1,6 +1,7 @@ +#!/bin/bash + go install github.com/go-bindata/go-bindata/... -for i in crd core rbac apps scc storage; do - OUTPUT="pkg/assets/${i}/bindata.go" - "${GOPATH}"/bin/go-bindata -nocompress -nometadata -prefix "pkg/assets/${i}" -pkg assets -o ${OUTPUT} "./assets/${i}/..." - gofmt -s -w "${OUTPUT}" -done + +OUTPUT="pkg/assets/bindata.go" +"${GOPATH}"/bin/go-bindata -nocompress -prefix "pkg/assets" -pkg assets -o ${OUTPUT} "./assets/..." +gofmt -s -w "${OUTPUT}" From bdef3a72c6e0b7f55aad3fc9c36dbd2f82d4a21f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Mar 2022 13:05:06 +0100 Subject: [PATCH 19/28] build(deps): bump actions/upload-artifact from 2.3.1 to 3 (#612) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 2.3.1 to 3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v2.3.1...v3) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ubuntu.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml index 51c94619323..a923c5045d4 100644 --- a/.github/workflows/ubuntu.yaml +++ b/.github/workflows/ubuntu.yaml @@ -29,7 +29,7 @@ jobs: run: make build - name: save artifacts - uses: actions/upload-artifact@v2.3.1 + uses: actions/upload-artifact@v3 with: name: microshift path: ./microshift From 9f890a4b58a65db4d49cabccdaa8468d608eba73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Mar 2022 13:05:23 +0100 Subject: [PATCH 20/28] build(deps): bump actions/checkout from 2 to 3 (#609) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/nightly.yaml | 2 +- .github/workflows/release.yaml | 2 +- .github/workflows/release_to_copr.yaml | 2 +- .github/workflows/testing.yaml | 2 +- .github/workflows/ubuntu.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 70f443ec677..de70997f8b1 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install required packages run: sudo apt install build-essential qemu-user qemu-user-static diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0718c1e447c..93d04140256 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -23,7 +23,7 @@ jobs: run: sudo apt-get update && sudo apt install build-essential qemu-user qemu-user-static podman - name: Checkout source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: execute release.sh shell: bash diff --git a/.github/workflows/release_to_copr.yaml b/.github/workflows/release_to_copr.yaml index fad4955059f..29f991c82b3 100644 --- a/.github/workflows/release_to_copr.yaml +++ b/.github/workflows/release_to_copr.yaml @@ -15,7 +15,7 @@ jobs: run: sudo dnf install -y copr-cli golang gcc make systemd policycoreutils rpm-build git which - name: Checkout source - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 56fb4929e0f..6b410d940ce 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -25,6 +25,6 @@ jobs: with: go-version: ${{ matrix.go-version }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Test run: go test $(go list ./... | grep -v /vendor/ | grep -v /scripts) diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml index a923c5045d4..a954b9774da 100644 --- a/.github/workflows/ubuntu.yaml +++ b/.github/workflows/ubuntu.yaml @@ -23,7 +23,7 @@ jobs: run: sudo apt install build-essential - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: make run: make build From 38a486c275e3155729a3fb62ff5341f65d563188 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Mar 2022 13:05:45 +0100 Subject: [PATCH 21/28] build(deps): bump technote-space/broken-link-checker-action (#572) Bumps [technote-space/broken-link-checker-action](https://github.com/technote-space/broken-link-checker-action) from 2.2.11 to 2.2.12. - [Release notes](https://github.com/technote-space/broken-link-checker-action/releases) - [Changelog](https://github.com/technote-space/broken-link-checker-action/blob/main/.releasegarc) - [Commits](https://github.com/technote-space/broken-link-checker-action/compare/v2.2.11...v2.2.12) --- updated-dependencies: - dependency-name: technote-space/broken-link-checker-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/broken-link-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/broken-link-check.yml b/.github/workflows/broken-link-check.yml index 44b86b3fd7e..3c28d87638f 100644 --- a/.github/workflows/broken-link-check.yml +++ b/.github/workflows/broken-link-check.yml @@ -13,4 +13,4 @@ jobs: runs-on: ubuntu-latest steps: - name: Broken Link Check - uses: technote-space/broken-link-checker-action@v2.2.11 + uses: technote-space/broken-link-checker-action@v2.2.12 From 27767d98f8e03ad4ff376d5032cf680ecdf3beae Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 31 Mar 2022 16:22:21 +0200 Subject: [PATCH 22/28] Equalise service stop or failures (#637) * Make kube-proxy Service stoppable The kube-proxy k8s code does not support an stop channel, but at least we can accept cancel on the context and move on, so MicroShift can be stopped properly when necessary. Related-Issue: #556 Signed-off-by: Miguel Angel Ajo * Exit on kustomization failure This normalizes the kustomization service with the other services, and makes kustomization issues evident to the deployer. Signed-off-by: Miguel Angel Ajo Pelayo * Make oauth-api-server stoppable and return errors Signed-off-by: Miguel Angel Ajo * Make ocp-api-server stoppable by manager Signed-off-by: Miguel Angel Ajo * Let the OpenshiftControllerManager service to be stopped Signed-off-by: Miguel Angel Ajo --- pkg/controllers/openshift-apiserver.go | 4 +--- pkg/controllers/openshift-controller-manager.go | 16 ++++++++++------ pkg/controllers/openshift-oauth-server.go | 5 ++--- pkg/kustomize/apply.go | 2 +- pkg/node/kube-proxy.go | 10 +++++++--- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/pkg/controllers/openshift-apiserver.go b/pkg/controllers/openshift-apiserver.go index 724540c071b..3d6546c9f73 100644 --- a/pkg/controllers/openshift-apiserver.go +++ b/pkg/controllers/openshift-apiserver.go @@ -119,11 +119,9 @@ func (s *OCPAPIServer) Run(ctx context.Context, ready chan<- struct{}, stopped c return err } - stopCh := make(chan struct{}) - if err := s.options.RunAPIServer(stopCh); err != nil { + if err := s.options.RunAPIServer(ctx.Done()); err != nil { klog.Fatalf("Failed to start ocp-apiserver %v", err) } - return ctx.Err() } diff --git a/pkg/controllers/openshift-controller-manager.go b/pkg/controllers/openshift-controller-manager.go index 1e6c83e8151..c20fb8bc352 100644 --- a/pkg/controllers/openshift-controller-manager.go +++ b/pkg/controllers/openshift-controller-manager.go @@ -17,7 +17,6 @@ package controllers import ( "context" - "fmt" "io" "io/ioutil" "os" @@ -107,7 +106,7 @@ func (s *OCPControllerManager) Run(ctx context.Context, ready chan<- struct{}, s go func() { healthcheckStatus := util.RetryTCPConnection("127.0.0.1", "8445") if !healthcheckStatus { - klog.Fatalf(s.Name(), fmt.Errorf("healthcheck status"), "%s failed to start") + klog.Fatalf("initial healthcheck on %s failed", s.Name()) } klog.Infof("%s is ready", s.Name()) close(ready) @@ -116,13 +115,18 @@ func (s *OCPControllerManager) Run(ctx context.Context, ready chan<- struct{}, s if err := assets.ApplyNamespaces([]string{ "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", }, s.kubeconfig); err != nil { - klog.Warningf("failed to apply openshift namespaces %v", err) + klog.Fatalf("failed to apply openshift namespaces %v", err) } options := openshift_controller_manager.OpenShiftControllerManager{Output: os.Stdout} options.ConfigFilePath = s.ConfigFilePath - if err := options.StartControllerManager(); err != nil { - klog.Fatalf("Failed to start openshift-controller-manager %v", err) - } + + go func() { + if err := options.StartControllerManager(); err != nil { + klog.Fatalf("Failed to start openshift-controller-manager %v", err) + } + }() + + <-ctx.Done() return ctx.Err() } diff --git a/pkg/controllers/openshift-oauth-server.go b/pkg/controllers/openshift-oauth-server.go index bc4d3d37330..5863c5f6c70 100644 --- a/pkg/controllers/openshift-oauth-server.go +++ b/pkg/controllers/openshift-oauth-server.go @@ -102,7 +102,6 @@ func (s *OpenShiftOAuth) configure(cfg *config.MicroshiftConfig) { func (s *OpenShiftOAuth) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { defer close(stopped) - stopCh := make(chan struct{}) // run readiness check go func() { @@ -114,8 +113,8 @@ func (s *OpenShiftOAuth) Run(ctx context.Context, ready chan<- struct{}, stopped close(ready) }() - if err := oauth_apiserver.RunOAuthAPIServer(s.options, stopCh); err != nil { - return err + if err := oauth_apiserver.RunOAuthAPIServer(s.options, ctx.Done()); err != nil { + klog.Fatalf("Error starting oauth API server: %s", err) } return ctx.Err() diff --git a/pkg/kustomize/apply.go b/pkg/kustomize/apply.go index 9a50fab0d64..0a23f8588dc 100644 --- a/pkg/kustomize/apply.go +++ b/pkg/kustomize/apply.go @@ -48,7 +48,7 @@ func (s *Kustomizer) Run(ctx context.Context, ready chan<- struct{}, stopped cha if _, err := os.Stat(kustomization); !errors.Is(err, os.ErrNotExist) { klog.Infof("Applying kustomization at %v ", kustomization) if err := ApplyKustomizationWithRetries(s.path, s.kubeconfig); err != nil { - klog.Warningf("Applying kustomization failed: %s. Giving up.", err) + klog.Fatalf("Applying kustomization failed: %s. Giving up.", err) } else { klog.Warningf("Kustomization applied successfully.") } diff --git a/pkg/node/kube-proxy.go b/pkg/node/kube-proxy.go index b97014ecb66..ea244e2b6a4 100644 --- a/pkg/node/kube-proxy.go +++ b/pkg/node/kube-proxy.go @@ -106,9 +106,13 @@ func (s *ProxyOptions) Run(ctx context.Context, ready chan<- struct{}, stopped c klog.Infof("%s is ready", s.Name()) close(ready) }() - if err := s.options.Run(); err != nil { - klog.Fatalf("%s failed to start", s.Name(), err) - } + go func() { + if err := s.options.Run(); err != nil { + klog.Fatalf("%s failed to start %v", s.Name(), err) + } + }() + + <-ctx.Done() return ctx.Err() } From 3e28ab4f7f0b1856ce7683e472f3f834fd604885 Mon Sep 17 00:00:00 2001 From: Ricardo Noriega De Soto Date: Thu, 31 Mar 2022 22:09:08 +0200 Subject: [PATCH 23/28] Format value of error for stopped services (#644) Signed-off-by: Ricardo Noriega --- pkg/cmd/run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/run.go b/pkg/cmd/run.go index 40dc6440f14..4a71e156cf6 100644 --- a/pkg/cmd/run.go +++ b/pkg/cmd/run.go @@ -121,7 +121,7 @@ func RunMicroshift(cfg *config.MicroshiftConfig, flags *pflag.FlagSet) error { go func() { klog.Infof("Started %s", m.Name()) if err := m.Run(ctx, ready, stopped); err != nil { - klog.Infof("Stopped %s", m.Name(), err) + klog.Errorf("Stopped %s: %v", m.Name(), err) } else { klog.Infof("%s completed", m.Name()) From 9d6ea58acfc7785d07dba023bf398ff1799ad1d6 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Fri, 1 Apr 2022 11:51:32 +0200 Subject: [PATCH 24/28] Add conntrack-tools dependency to rpms (#624) We need to check EL8/9 targets for such package being fully available once built in copr nightly. Signed-off-by: Miguel Angel Ajo Pelayo --- packaging/rpm/microshift.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/rpm/microshift.spec b/packaging/rpm/microshift.spec index e44640cef0c..449cebb978c 100644 --- a/packaging/rpm/microshift.spec +++ b/packaging/rpm/microshift.spec @@ -66,6 +66,7 @@ Requires: cri-o Requires: cri-tools Requires: iptables Requires: microshift-selinux +Requires: conntrack-tools %{?systemd_requires} From 8c7936371286bd591f8880d8705a3723426d58d2 Mon Sep 17 00:00:00 2001 From: Jon Cope Date: Wed, 6 Apr 2022 10:05:48 -0500 Subject: [PATCH 25/28] add default annotation to kubevirt-hostprovisioner storageclass (#647) --- .../hostpath-provisioner/storageclass.yaml | 4 +- pkg/assets/bindata.go | 117 +++++++++--------- 2 files changed, 63 insertions(+), 58 deletions(-) diff --git a/assets/components/hostpath-provisioner/storageclass.yaml b/assets/components/hostpath-provisioner/storageclass.yaml index 732978ff9e4..53b3f675c47 100644 --- a/assets/components/hostpath-provisioner/storageclass.yaml +++ b/assets/components/hostpath-provisioner/storageclass.yaml @@ -2,6 +2,8 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: kubevirt-hostpath-provisioner + annotations: + storageclass.kubernetes.io/is-default-class: "true" provisioner: kubevirt.io/hostpath-provisioner reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer \ No newline at end of file +volumeBindingMode: WaitForFirstConsumer diff --git a/pkg/assets/bindata.go b/pkg/assets/bindata.go index 5af716fc417..b10c4223fcb 100644 --- a/pkg/assets/bindata.go +++ b/pkg/assets/bindata.go @@ -148,7 +148,7 @@ func assetsComponentsFlannelClusterroleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/clusterrole.yaml", size: 418, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/clusterrole.yaml", size: 418, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -176,7 +176,7 @@ func assetsComponentsFlannelClusterrolebindingYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/clusterrolebinding.yaml", size: 248, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/clusterrolebinding.yaml", size: 248, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -229,7 +229,7 @@ func assetsComponentsFlannelConfigmapYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/configmap.yaml", size: 674, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/configmap.yaml", size: 674, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -349,7 +349,7 @@ func assetsComponentsFlannelDaemonsetYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/daemonset.yaml", size: 2543, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/daemonset.yaml", size: 2543, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -411,7 +411,7 @@ func assetsComponentsFlannelPodsecuritypolicyYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/podsecuritypolicy.yaml", size: 1195, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/podsecuritypolicy.yaml", size: 1195, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -432,7 +432,7 @@ func assetsComponentsFlannelServiceAccountYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/flannel/service-account.yaml", size: 86, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/flannel/service-account.yaml", size: 86, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -471,7 +471,7 @@ func assetsComponentsHostpathProvisionerClusterroleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrole.yaml", size: 609, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrole.yaml", size: 609, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -499,7 +499,7 @@ func assetsComponentsHostpathProvisionerClusterrolebindingYaml() (*asset, error) return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrolebinding.yaml", size: 338, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/clusterrolebinding.yaml", size: 338, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -555,7 +555,7 @@ func assetsComponentsHostpathProvisionerDaemonsetYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/daemonset.yaml", size: 1225, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/daemonset.yaml", size: 1225, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -575,7 +575,7 @@ func assetsComponentsHostpathProvisionerNamespaceYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/namespace.yaml", size: 78, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/namespace.yaml", size: 78, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -616,7 +616,7 @@ func assetsComponentsHostpathProvisionerSccYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/scc.yaml", size: 480, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/scc.yaml", size: 480, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -637,7 +637,7 @@ func assetsComponentsHostpathProvisionerServiceAccountYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/service-account.yaml", size: 132, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/service-account.yaml", size: 132, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -646,9 +646,12 @@ var _assetsComponentsHostpathProvisionerStorageclassYaml = []byte(`apiVersion: s kind: StorageClass metadata: name: kubevirt-hostpath-provisioner + annotations: + storageclass.kubernetes.io/is-default-class: "true" provisioner: kubevirt.io/hostpath-provisioner reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer`) +volumeBindingMode: WaitForFirstConsumer +`) func assetsComponentsHostpathProvisionerStorageclassYamlBytes() ([]byte, error) { return _assetsComponentsHostpathProvisionerStorageclassYaml, nil @@ -660,7 +663,7 @@ func assetsComponentsHostpathProvisionerStorageclassYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/hostpath-provisioner/storageclass.yaml", size: 204, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/hostpath-provisioner/storageclass.yaml", size: 276, mode: os.FileMode(420), modTime: time.Unix(1649188210, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -688,7 +691,7 @@ func assetsComponentsOpenshiftDnsDnsClusterRoleBindingYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role-binding.yaml", size: 223, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role-binding.yaml", size: 223, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -742,7 +745,7 @@ func assetsComponentsOpenshiftDnsDnsClusterRoleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role.yaml", size: 492, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/cluster-role.yaml", size: 492, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -788,7 +791,7 @@ func assetsComponentsOpenshiftDnsDnsConfigmapYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/configmap.yaml", size: 610, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/configmap.yaml", size: 610, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -910,7 +913,7 @@ func assetsComponentsOpenshiftDnsDnsDaemonsetYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/daemonset.yaml", size: 3179, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/daemonset.yaml", size: 3179, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -939,7 +942,7 @@ func assetsComponentsOpenshiftDnsDnsNamespaceYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/namespace.yaml", size: 417, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/namespace.yaml", size: 417, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -961,7 +964,7 @@ func assetsComponentsOpenshiftDnsDnsServiceAccountYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service-account.yaml", size: 85, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service-account.yaml", size: 85, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1008,7 +1011,7 @@ func assetsComponentsOpenshiftDnsDnsServiceYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service.yaml", size: 691, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/dns/service.yaml", size: 691, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1152,7 +1155,7 @@ func assetsComponentsOpenshiftDnsNodeResolverDaemonsetYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/daemonset.yaml", size: 4823, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/daemonset.yaml", size: 4823, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1174,7 +1177,7 @@ func assetsComponentsOpenshiftDnsNodeResolverServiceAccountYaml() (*asset, error return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/service-account.yaml", size: 95, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-dns/node-resolver/service-account.yaml", size: 95, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1204,7 +1207,7 @@ func assetsComponentsOpenshiftRouterClusterRoleBindingYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role-binding.yaml", size: 329, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role-binding.yaml", size: 329, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1282,7 +1285,7 @@ func assetsComponentsOpenshiftRouterClusterRoleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role.yaml", size: 883, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/cluster-role.yaml", size: 883, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1306,7 +1309,7 @@ func assetsComponentsOpenshiftRouterConfigmapYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/configmap.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/configmap.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1463,7 +1466,7 @@ func assetsComponentsOpenshiftRouterDeploymentYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/deployment.yaml", size: 4746, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/deployment.yaml", size: 4746, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1494,7 +1497,7 @@ func assetsComponentsOpenshiftRouterNamespaceYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/namespace.yaml", size: 499, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/namespace.yaml", size: 499, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1518,7 +1521,7 @@ func assetsComponentsOpenshiftRouterServiceAccountYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/service-account.yaml", size: 213, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/service-account.yaml", size: 213, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1557,7 +1560,7 @@ func assetsComponentsOpenshiftRouterServiceCloudYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/service-cloud.yaml", size: 567, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/service-cloud.yaml", size: 567, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1602,7 +1605,7 @@ func assetsComponentsOpenshiftRouterServiceInternalYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/openshift-router/service-internal.yaml", size: 727, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/openshift-router/service-internal.yaml", size: 727, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1683,7 +1686,7 @@ func assetsComponentsServiceCaClusterroleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/clusterrole.yaml", size: 864, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/clusterrole.yaml", size: 864, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1712,7 +1715,7 @@ func assetsComponentsServiceCaClusterrolebindingYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/clusterrolebinding.yaml", size: 298, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/clusterrolebinding.yaml", size: 298, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1797,7 +1800,7 @@ func assetsComponentsServiceCaDeploymentYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/deployment.yaml", size: 1866, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/deployment.yaml", size: 1866, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1821,7 +1824,7 @@ func assetsComponentsServiceCaNsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/ns.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/ns.yaml", size: 168, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1884,7 +1887,7 @@ func assetsComponentsServiceCaRoleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/role.yaml", size: 634, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/role.yaml", size: 634, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1914,7 +1917,7 @@ func assetsComponentsServiceCaRolebindingYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/rolebinding.yaml", size: 343, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/rolebinding.yaml", size: 343, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1936,7 +1939,7 @@ func assetsComponentsServiceCaSaYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/sa.yaml", size: 99, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/sa.yaml", size: 99, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1960,7 +1963,7 @@ func assetsComponentsServiceCaSigningCabundleYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/signing-cabundle.yaml", size: 123, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/signing-cabundle.yaml", size: 123, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1986,7 +1989,7 @@ func assetsComponentsServiceCaSigningSecretYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/components/service-ca/signing-secret.yaml", size: 144, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/components/service-ca/signing-secret.yaml", size: 144, mode: os.FileMode(420), modTime: time.Unix(1648147667, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -2012,7 +2015,7 @@ func assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml() (*as return nil, err } - info := bindataFileInfo{name: "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", size: 254, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", size: 254, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -2240,7 +2243,7 @@ func assetsCrd0000_03_authorizationOpenshift_01_rolebindingrestrictionCrdYaml() return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml", size: 10910, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml", size: 10910, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -2361,7 +2364,7 @@ func assetsCrd0000_03_configOperator_01_proxyCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_config-operator_01_proxy.crd.yaml", size: 4972, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_config-operator_01_proxy.crd.yaml", size: 4972, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -2630,7 +2633,7 @@ func assetsCrd0000_03_quotaOpenshift_01_clusterresourcequotaCrdYaml() (*asset, e return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", size: 12895, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", size: 12895, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3017,7 +3020,7 @@ func assetsCrd0000_03_securityOpenshift_01_sccCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_03_security-openshift_01_scc.crd.yaml", size: 17110, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_03_security-openshift_01_scc.crd.yaml", size: 17110, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3435,7 +3438,7 @@ func assetsCrd0000_10_configOperator_01_buildCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_build.crd.yaml", size: 22856, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_build.crd.yaml", size: 22856, mode: os.FileMode(420), modTime: time.Unix(1648480848, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3531,7 +3534,7 @@ func assetsCrd0000_10_configOperator_01_featuregateCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_featuregate.crd.yaml", size: 3486, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_featuregate.crd.yaml", size: 3486, mode: os.FileMode(420), modTime: time.Unix(1647546859, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3710,7 +3713,7 @@ func assetsCrd0000_10_configOperator_01_imageCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_image.crd.yaml", size: 8484, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_image.crd.yaml", size: 8484, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3821,7 +3824,7 @@ func assetsCrd0000_10_configOperator_01_imagecontentsourcepolicyCrdYaml() (*asse return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml", size: 5139, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml", size: 5139, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5381,7 +5384,7 @@ func assetsCrd0000_11_imageregistryConfigsCrdYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/crd/0000_11_imageregistry-configs.crd.yaml", size: 90225, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/crd/0000_11_imageregistry-configs.crd.yaml", size: 90225, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5440,7 +5443,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccAnyuidYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml", size: 1048, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-anyuid.yaml", size: 1048, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5504,7 +5507,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccHostaccessYaml() (*asset, erro return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml", size: 1267, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostaccess.yaml", size: 1267, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5568,7 +5571,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccHostmountAnyuidYaml() (*asset, return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml", size: 1298, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostmount-anyuid.yaml", size: 1298, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5630,7 +5633,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccHostnetworkYaml() (*asset, err return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml", size: 1123, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-hostnetwork.yaml", size: 1123, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5692,7 +5695,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccNonrootYaml() (*asset, error) return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml", size: 1166, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-nonroot.yaml", size: 1166, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5756,7 +5759,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccPrivilegedYaml() (*asset, erro return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml", size: 1291, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-privileged.yaml", size: 1291, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5819,7 +5822,7 @@ func assetsScc0000_20_kubeApiserverOperator_00_sccRestrictedYaml() (*asset, erro return nil, err } - info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml", size: 1213, mode: os.FileMode(420), modTime: time.Unix(1646566396, 0)} + info := bindataFileInfo{name: "assets/scc/0000_20_kube-apiserver-operator_00_scc-restricted.yaml", size: 1213, mode: os.FileMode(420), modTime: time.Unix(1642788979, 0)} a := &asset{bytes: bytes, info: info} return a, nil } From 78be44960257771cc0026213d674d9ca2321b379 Mon Sep 17 00:00:00 2001 From: Ricardo Noriega De Soto Date: Fri, 8 Apr 2022 13:38:51 +0200 Subject: [PATCH 26/28] Expose service node port range as MicroShift config (#649) Signed-off-by: Ricardo Noriega --- pkg/config/config.go | 20 +++++++++++--------- pkg/config/config_test.go | 25 +++++++++++++++---------- pkg/controllers/kube-apiserver.go | 1 + 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 14b6a616e0d..60540e36eee 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -32,10 +32,11 @@ var ( type ClusterConfig struct { URL string `yaml:"url"` - ClusterCIDR string `yaml:"clusterCIDR"` - ServiceCIDR string `yaml:"serviceCIDR"` - DNS string `yaml:"dns"` - Domain string `yaml:"domain"` + ClusterCIDR string `yaml:"clusterCIDR"` + ServiceCIDR string `yaml:"serviceCIDR"` + ServiceNodePortRange string `yaml:"serviceNodePortRange"` + DNS string `yaml:"dns"` + Domain string `yaml:"domain"` } type ControlPlaneConfig struct { @@ -82,11 +83,12 @@ func NewMicroshiftConfig() *MicroshiftConfig { NodeName: nodeName, NodeIP: nodeIP, Cluster: ClusterConfig{ - URL: "https://127.0.0.1:6443", - ClusterCIDR: "10.42.0.0/16", - ServiceCIDR: "10.43.0.0/16", - DNS: "10.43.0.10", - Domain: "cluster.local", + URL: "https://127.0.0.1:6443", + ClusterCIDR: "10.42.0.0/16", + ServiceCIDR: "10.43.0.0/16", + ServiceNodePortRange: "30000-32767", + DNS: "10.43.0.10", + Domain: "cluster.local", }, ControlPlane: ControlPlaneConfig{}, Node: NodeConfig{}, diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index b408265bc28..4c39fc17cc9 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -46,11 +46,12 @@ func TestCommandLineConfig(t *testing.T) { NodeName: "node1", NodeIP: "1.2.3.4", Cluster: ClusterConfig{ - URL: "https://1.2.3.4:6443", - ClusterCIDR: "10.20.30.40/16", - ServiceCIDR: "40.30.20.10/16", - DNS: "cluster.dns", - Domain: "cluster.local", + URL: "https://1.2.3.4:6443", + ClusterCIDR: "10.20.30.40/16", + ServiceCIDR: "40.30.20.10/16", + ServiceNodePortRange: "1024-32767", + DNS: "cluster.dns", + Domain: "cluster.local", }, }, err: nil, @@ -70,6 +71,7 @@ func TestCommandLineConfig(t *testing.T) { flags.StringVar(&config.Cluster.URL, "cluster-url", "", "") flags.StringVar(&config.Cluster.ClusterCIDR, "cluster-cidr", "", "") flags.StringVar(&config.Cluster.ServiceCIDR, "service-cidr", "", "") + flags.StringVar(&config.Cluster.ServiceNodePortRange, "service-node-port-range", "", "") flags.StringVar(&config.Cluster.DNS, "cluster-dns", "", "") flags.StringVar(&config.Cluster.Domain, "cluster-domain", "", "") @@ -84,6 +86,7 @@ func TestCommandLineConfig(t *testing.T) { "--cluster-url=" + tt.config.Cluster.URL, "--cluster-cidr=" + tt.config.Cluster.ClusterCIDR, "--service-cidr=" + tt.config.Cluster.ServiceCIDR, + "--service-node-port-range=" + tt.config.Cluster.ServiceNodePortRange, "--cluster-dns=" + tt.config.Cluster.DNS, "--cluster-domain=" + tt.config.Cluster.Domain, }) @@ -120,11 +123,12 @@ func TestEnvironmentVariableConfig(t *testing.T) { NodeName: "node1", NodeIP: "1.2.3.4", Cluster: ClusterConfig{ - URL: "https://cluster.com:4343/endpoint", - ClusterCIDR: "10.20.30.40/16", - ServiceCIDR: "40.30.20.10/16", - DNS: "10.43.0.10", - Domain: "cluster.local", + URL: "https://cluster.com:4343/endpoint", + ClusterCIDR: "10.20.30.40/16", + ServiceCIDR: "40.30.20.10/16", + ServiceNodePortRange: "1024-32767", + DNS: "10.43.0.10", + Domain: "cluster.local", }, }, err: nil, @@ -142,6 +146,7 @@ func TestEnvironmentVariableConfig(t *testing.T) { {"MICROSHIFT_CLUSTER_URL", "https://cluster.com:4343/endpoint"}, {"MICROSHIFT_CLUSTER_CLUSTERCIDR", "10.20.30.40/16"}, {"MICROSHIFT_CLUSTER_SERVICECIDR", "40.30.20.10/16"}, + {"MICROSHIFT_CLUSTER_SERVICENODEPORTRANGE", "1024-32767"}, {"MICROSHIFT_CLUSTER_DNS", "10.43.0.10"}, {"MICROSHIFT_CLUSTER_DOMAIN", "cluster.local"}, }, diff --git a/pkg/controllers/kube-apiserver.go b/pkg/controllers/kube-apiserver.go index 6fe70a17e13..44cb36cda65 100644 --- a/pkg/controllers/kube-apiserver.go +++ b/pkg/controllers/kube-apiserver.go @@ -110,6 +110,7 @@ func (s *KubeAPIServer) configure(cfg *config.MicroshiftConfig) { "--service-account-key-file=" + cfg.DataDir + "/resources/kube-apiserver/secrets/service-account-key/service-account.crt", "--service-account-signing-key-file=" + cfg.DataDir + "/resources/kube-apiserver/secrets/service-account-key/service-account.key", "--service-cluster-ip-range=" + cfg.Cluster.ServiceCIDR, + "--service-node-port-range=" + cfg.Cluster.ServiceNodePortRange, "--storage-backend=etcd3", "--tls-cert-file=" + cfg.DataDir + "/certs/kube-apiserver/secrets/service-network-serving-certkey/tls.crt", "--tls-private-key-file=" + cfg.DataDir + "/certs/kube-apiserver/secrets/service-network-serving-certkey/tls.key", From 74623de8f564c94736060189890e0db56d55e032 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 13:17:14 +0200 Subject: [PATCH 27/28] build(deps): bump actions/setup-go from 2 to 3 (#651) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2 to 3. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/testing.yaml | 2 +- .github/workflows/ubuntu.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 6b410d940ce..7ad45405a93 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -21,7 +21,7 @@ jobs: # test CI concept echo "${{ matrix.go-version }}" - name: Install Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - name: Checkout code diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml index a954b9774da..d4f670fca7d 100644 --- a/.github/workflows/ubuntu.yaml +++ b/.github/workflows/ubuntu.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Install Go - uses: actions/setup-go@v2.1.4 + uses: actions/setup-go@v3 with: go-version: ${{ env.GO_VERSION }} From 758fab444d13619ceea2dcd82b55c9848564da2a Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Date: Tue, 12 Apr 2022 13:50:33 +0200 Subject: [PATCH 28/28] Allow multiple kustomization paths This modifies the single path /var/lib/microshift/manifests default into: - /usr/lib/microshift/manifests - /etc/microshift/manifests which will be evaluated in order. While the first one could be fully managed in ostree (core components), the second one could be managed by configuration management. In addition the MicroShift configuration file supports customization of this via the manifestsDir entry. --- pkg/config/config.go | 7 +++++++ pkg/kustomize/apply.go | 16 +++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 60540e36eee..326dab480f2 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -22,6 +22,10 @@ const ( defaultUserDataDir = "~/.microshift/data" defaultGlobalConfigFile = "/etc/microshift/config.yaml" defaultGlobalDataDir = "/var/lib/microshift" + // for files managed via management system in /etc, i.e. user applications + defaultKustomizeDirEtc = "/etc/microshift/manifests" + // for files embedded in ostree. i.e. cni/other component customizations + defaultKustomizeDirLib = "/usr/lib/microshift/manifests" ) var ( @@ -62,6 +66,8 @@ type MicroshiftConfig struct { Cluster ClusterConfig `yaml:"cluster"` ControlPlane ControlPlaneConfig `yaml:"controlPlane"` Node NodeConfig `yaml:"node"` + + Manifests []string `yaml:manifests` } func NewMicroshiftConfig() *MicroshiftConfig { @@ -92,6 +98,7 @@ func NewMicroshiftConfig() *MicroshiftConfig { }, ControlPlane: ControlPlaneConfig{}, Node: NodeConfig{}, + Manifests: []string{defaultKustomizeDirLib, defaultKustomizeDirEtc}, } } diff --git a/pkg/kustomize/apply.go b/pkg/kustomize/apply.go index 0a23f8588dc..b87eacaee61 100644 --- a/pkg/kustomize/apply.go +++ b/pkg/kustomize/apply.go @@ -26,13 +26,13 @@ const ( ) type Kustomizer struct { - path string + paths []string kubeconfig string } func NewKustomizer(cfg *config.MicroshiftConfig) *Kustomizer { return &Kustomizer{ - path: filepath.Join(cfg.DataDir, "manifests"), + paths: cfg.Manifests, kubeconfig: filepath.Join(cfg.DataDir, "resources", "kubeadmin", "kubeconfig"), } } @@ -44,7 +44,15 @@ func (s *Kustomizer) Run(ctx context.Context, ready chan<- struct{}, stopped cha defer close(stopped) defer close(ready) - kustomization := filepath.Join(s.path, "kustomization.yaml") + for _, path := range s.paths { + s.ApplyKustomizationPath(path) + } + + return ctx.Err() +} + +func (s *Kustomizer) ApplyKustomizationPath(path string) { + kustomization := filepath.Join(path, "kustomization.yaml") if _, err := os.Stat(kustomization); !errors.Is(err, os.ErrNotExist) { klog.Infof("Applying kustomization at %v ", kustomization) if err := ApplyKustomizationWithRetries(s.path, s.kubeconfig); err != nil { @@ -55,8 +63,6 @@ func (s *Kustomizer) Run(ctx context.Context, ready chan<- struct{}, stopped cha } else { klog.Infof("No kustomization found at " + kustomization) } - - return ctx.Err() } func ApplyKustomizationWithRetries(kustomization string, kubeconfig string) error {