From f07089aa05019fdfb36c58c58713b78a426af788 Mon Sep 17 00:00:00 2001 From: Chris Ostrouchov Date: Fri, 18 Mar 2022 17:46:48 -0400 Subject: [PATCH 01/13] Checking in files for kind deployment --- qhub/stages/input_vars.py | 7 +- .../stages/02-infrastructure/local/main.tf | 116 ++++- .../02-infrastructure/local/metallb.yaml | 480 ++++++++++++++++++ .../stages/02-infrastructure/local/outputs.tf | 20 + .../02-infrastructure/local/variables.tf | 10 + 5 files changed, 621 insertions(+), 12 deletions(-) create mode 100644 qhub/template/stages/02-infrastructure/local/metallb.yaml create mode 100644 qhub/template/stages/02-infrastructure/local/outputs.tf create mode 100644 qhub/template/stages/02-infrastructure/local/variables.tf diff --git a/qhub/stages/input_vars.py b/qhub/stages/input_vars.py index 6f4e5f94b5..abc13c8e99 100644 --- a/qhub/stages/input_vars.py +++ b/qhub/stages/input_vars.py @@ -36,7 +36,12 @@ def stage_01_terraform_state(stage_outputs, config): def stage_02_infrastructure(stage_outputs, config): if config["provider"] == "local": - return {"kube_context": config["local"].get("kube_context")} + return { + "kubeconfig_filename": os.path.join( + tempfile.gettempdir(), "QHUB_KUBECONFIG" + ), + "kube_context": config["local"].get("kube_context") + } elif config["provider"] == "do": return { "name": config["project_name"], diff --git a/qhub/template/stages/02-infrastructure/local/main.tf b/qhub/template/stages/02-infrastructure/local/main.tf index f2ad3e2aa1..1408a0f4c5 100644 --- a/qhub/template/stages/02-infrastructure/local/main.tf +++ b/qhub/template/stages/02-infrastructure/local/main.tf @@ -1,17 +1,111 @@ -variable "kube_context" { - description = "Optional kubernetes context to use to connect to kubernetes cluster" - type = string +terraform { + required_providers { + kind = { + source = "kyma-incubator/kind" + version = "0.0.11" + } + docker = { + source = "kreuzwerker/docker" + version = "2.16.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.7.0" + } + } +} + +provider "kind" { + +} + +provider "docker" { + } -output "kubernetes_credentials" { - description = "Parameters needed to connect to kubernetes cluster locally" - value = { - config_path = pathexpand("~/.kube/config") - config_context = var.kube_context +provider "kubernetes" { + host = kind_cluster.default.endpoint + cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate +} + +provider "kubectl" { + load_config_file = false + host = kind_cluster.default.endpoint + cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate +} + +resource "kind_cluster" "default" { + name = "test-cluster" + wait_for_ready = true + + kind_config { + kind = "Cluster" + api_version = "kind.x-k8s.io/v1alpha4" + + node { + role = "general" + image = "kindest/node:v1.21.10" + } } } -output "kubeconfig_filename" { - description = "filename for qhub kubeconfig" - value = pathexpand("~/.kube/config") +resource "kubernetes_namespace" "metallb" { + metadata { + name = "metallb-system" + } +} + +data "kubectl_path_documents" "metallb" { + pattern = "${path.module}/metallb.yaml" +} + +resource "kubectl_manifest" "metallb" { + for_each = toset(data.kubectl_path_documents.metallb.documents) + yaml_body = each.value + wait = true + depends_on = [ kubernetes_namespace.metallb ] +} + +resource "kubectl_manifest" "load-balancer" { + yaml_body = yamlencode({ + apiVersion = "v1" + kind = "ConfigMap" + metadata = { + namespace = kubernetes_namespace.metallb.metadata.0.name + name = "config" + } + data = { + config = yamlencode({ + address-pools = [{ + name = "default" + protocol = "layer2" + addresses = [ + "${local.metallb_ip_min}-${local.metallb_ip_max}" + ] + }] + }) + } + }) + + depends_on = [ kubectl_manifest.metallb ] +} + +data "docker_network" "kind" { + name = "kind" + + depends_on = [ kind_cluster.default ] +} + +locals { + metallb_ip_min = cidrhost([ + for network in data.docker_network.kind.ipam_config : network if network.gateway != "" + ][0].subnet, 356) + + metallb_ip_max = cidrhost([ + for network in data.docker_network.kind.ipam_config : network if network.gateway != "" + ][0].subnet, 406) } diff --git a/qhub/template/stages/02-infrastructure/local/metallb.yaml b/qhub/template/stages/02-infrastructure/local/metallb.yaml new file mode 100644 index 0000000000..9d6b6833c8 --- /dev/null +++ b/qhub/template/stages/02-infrastructure/local/metallb.yaml @@ -0,0 +1,480 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: controller +spec: + allowPrivilegeEscalation: false + allowedCapabilities: [] + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: speaker +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_RAW + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + hostIPC: false + hostNetwork: true + hostPID: false + hostPorts: + - max: 7472 + min: 7472 + - max: 7946 + min: 7946 + privileged: true + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create +- apiGroups: + - '' + resources: + - secrets + resourceNames: + - memberlist + verbs: + - list +- apiGroups: + - apps + resources: + - deployments + resourceNames: + - controller + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: +- kind: ServiceAccount + name: controller +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: +- kind: ServiceAccount + name: controller +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port=7472 + - --config=config + - --log-level=info + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + # when changing this default you also need to update the container ports definition + # and the PodSecurityPolicy hostPorts definition + #- name: METALLB_ML_BIND_PORT + # value: "7946" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: quay.io/metallb/speaker:v0.12.1 + name: speaker + ports: + - containerPort: 7472 + name: monitoring + - containerPort: 7946 + name: memberlist-tcp + - containerPort: 7946 + name: memberlist-udp + protocol: UDP + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + containers: + - args: + - --port=7472 + - --config=config + - --log-level=info + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: quay.io/metallb/controller:v0.12.1 + name: controller + ports: + - containerPort: 7472 + name: monitoring + livenessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /metrics + port: monitoring + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + fsGroup: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/qhub/template/stages/02-infrastructure/local/outputs.tf b/qhub/template/stages/02-infrastructure/local/outputs.tf new file mode 100644 index 0000000000..5356dd560b --- /dev/null +++ b/qhub/template/stages/02-infrastructure/local/outputs.tf @@ -0,0 +1,20 @@ +output "kubernetes_credentials" { + description = "Parameters needed to connect to kubernetes cluster locally" + sensitive = true + value = { + host = kind_cluster.default.endpoint + cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate + } +} + +resource "local_file" "default" { + content = kind_cluster.default.kubeconfig + filename = var.kubeconfig_filename +} + +output "kubeconfig_filename" { + description = "filename for qhub kubeconfig" + value = var.kubeconfig_filename +} diff --git a/qhub/template/stages/02-infrastructure/local/variables.tf b/qhub/template/stages/02-infrastructure/local/variables.tf new file mode 100644 index 0000000000..097bb19598 --- /dev/null +++ b/qhub/template/stages/02-infrastructure/local/variables.tf @@ -0,0 +1,10 @@ +variable "kubeconfig_filename" { + description = "Kubernetes kubeconfig written to filesystem" + type = string + default = null +} + +variable "kube_context" { + description = "Optional kubernetes context to use to connect to kubernetes cluster" + type = string +} From 32a286f07e97c3ad23d795a2080f4ada0f3d3c87 Mon Sep 17 00:00:00 2001 From: Chris Ostrouchov Date: Fri, 18 Mar 2022 18:26:10 -0400 Subject: [PATCH 02/13] Using kind for deployment --- .github/workflows/kubernetes_test.yaml | 38 ++++---------------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 24f4880604..033fae6d67 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -55,55 +55,34 @@ jobs: run: | mkdir -p bin pushd bin - curl -L https://github.com/kubernetes/minikube/releases/download/v1.22.0/minikube-linux-amd64 -o minikube - chmod +x minikube + curl -L https://github.com/kubernetes-sigs/kind/releases/download/v0.12.0/kind-linux-arm64 -o kind + chmod +x kind curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl chmod +x kubectl echo "$PWD" >> $GITHUB_PATH popd - - name: Start Minikube + - name: Enable docker permissions for user run: | sudo docker ps sudo usermod -aG docker $USER && newgrp docker - minikube start --kubernetes-version=1.19.4 --driver=docker --cpus 4 --memory 12288 --wait=all - - name: Print minikube and kubectl versions - run: | - minikube version - kubectl version - - name: Use minikube docker daemon - run: | - eval $(minikube docker-env) - echo "DOCKER_TLS_VERIFY=$DOCKER_TLS_VERIFY" >> $GITHUB_ENV - echo "DOCKER_HOST=$DOCKER_HOST" >> $GITHUB_ENV - echo "DOCKER_CERT_PATH=$DOCKER_CERT_PATH" >> $GITHUB_ENV - echo "MINIKUBE_ACTIVE_DOCKERD=$MINIKUBE_ACTIVE_DOCKERD" >> $GITHUB_ENV - - name: Print docker connection information - run: | + docker info docker ps - name: Build docker images run: | cd qhub/template/image docker buildx bake + kind load docker-image quansight/qhub-jupyterhub:${TAG} quansight/qhub-jupyterlab:${TAG} quansight/qhub-dask-worker:${TAG} quansight/qhub-dask-gateway:${TAG} env: TAG: ${{ github.sha }} - - name: List docker images in minikube - run: | - docker images - name: Get routing table for docker pods run: | ip route - - name: Configure LoadBalancer IPs - run: | - python tests/scripts/minikube-loadbalancer-ip.py - name: Add DNS entry to hosts run: | - sudo echo "192.168.49.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts - - name: Enable Minikube metallb - run: | - minikube addons enable metallb + sudo echo "172.20.1.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts - name: Basic kubectl checks before deployment run: | kubectl get all,cm,secret,ing -A @@ -202,8 +181,3 @@ jobs: if: always() run: | kubectl get all,cm,secret,ing -A - - - name: Delete minikube cluster - if: always() - run: | - minikube delete From e372cf5b68a21967f620b056b7a212eee6c831c3 Mon Sep 17 00:00:00 2001 From: Chris Ostrouchov Date: Fri, 18 Mar 2022 18:31:01 -0400 Subject: [PATCH 03/13] Black formatting --- qhub/stages/input_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qhub/stages/input_vars.py b/qhub/stages/input_vars.py index abc13c8e99..699707c3c5 100644 --- a/qhub/stages/input_vars.py +++ b/qhub/stages/input_vars.py @@ -40,7 +40,7 @@ def stage_02_infrastructure(stage_outputs, config): "kubeconfig_filename": os.path.join( tempfile.gettempdir(), "QHUB_KUBECONFIG" ), - "kube_context": config["local"].get("kube_context") + "kube_context": config["local"].get("kube_context"), } elif config["provider"] == "do": return { From adcc6b58f6e4942e807ea4213bb62b9a0f74385a Mon Sep 17 00:00:00 2001 From: Chris Ostrouchov Date: Fri, 18 Mar 2022 18:39:03 -0400 Subject: [PATCH 04/13] Cannot view kubernetes cluster before deployment now --- .github/workflows/kubernetes_test.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 033fae6d67..23c1919d31 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -83,9 +83,6 @@ jobs: - name: Add DNS entry to hosts run: | sudo echo "172.20.1.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts - - name: Basic kubectl checks before deployment - run: | - kubectl get all,cm,secret,ing -A - name: Initialize QHub Cloud run: | mkdir -p local-deployment From 38ec11ac507c2d191ce63b8d433c3b2dd58195a8 Mon Sep 17 00:00:00 2001 From: Chris Ostrouchov Date: Fri, 18 Mar 2022 18:56:43 -0400 Subject: [PATCH 05/13] arm64 -> amd64 typo --- .github/workflows/kubernetes_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 23c1919d31..f9242f09c8 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -55,7 +55,7 @@ jobs: run: | mkdir -p bin pushd bin - curl -L https://github.com/kubernetes-sigs/kind/releases/download/v0.12.0/kind-linux-arm64 -o kind + curl -L https://github.com/kubernetes-sigs/kind/releases/download/v0.12.0/kind-linux-amd64 -o kind chmod +x kind curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl From 7ff655d747072c698bc654a0921b067c737a0e50 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 13:52:47 -0300 Subject: [PATCH 06/13] use decoupled docker image new registry As the original docker build pipeline was moved to a separate repo, those changes need to be reflected in the workflow as well: - Update docker image labels and registry (hardcoded to main) - Remove docker build stage, added docker pull in place use bash loop for docker pull fix looping bash syntax --- .github/workflows/kubernetes_test.yaml | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 529d6de7d4..fca08919f3 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -51,7 +51,7 @@ jobs: run: | conda install -c anaconda pip pip install .[dev] - - name: Download and Install Minikube and Kubectl + - name: Download and Install Kind and Kubectl run: | mkdir -p bin pushd bin @@ -70,13 +70,19 @@ jobs: docker info docker ps - - name: Build docker images + - name: Pull docker images run: | - cd qhub/template/image - docker buildx bake - kind load docker-image quansight/qhub-jupyterhub:${TAG} quansight/qhub-jupyterlab:${TAG} quansight/qhub-dask-worker:${TAG} quansight/qhub-dask-gateway:${TAG} + images=( ${REGISTRY}/nebari-jupyterhub:${TAG} ${REGISTRY}/nebari-jupyterlab:${TAG} ${REGISTRY}/nebari-dask-worker:${TAG} ) + for img in "${images[@]}"; do docker pull $img; done env: - TAG: ${{ github.sha }} + TAG: sha-43788cc + REGISTRY: quay.io/nebari + - name: Load Kind + run: | + kind load docker-image ${REGISTRY}/nebari-jupyterhub:${TAG} ${REGISTRY}/nebari-jupyterlab:${TAG} ${REGISTRY}/nebari-dask-worker:${TAG} + env: + TAG: sha-43788cc + REGISTRY: quay.io/nebari - name: List docker images in minikube run: | docker images @@ -92,9 +98,9 @@ jobs: cd local-deployment qhub init local --project=thisisatest --domain github-actions.qhub.dev --auth-provider=password - # Need smaller profiles on Minikube - sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "qhub-config.yaml" - sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "qhub-config.yaml" + # # Need smaller profiles on Minikube + # sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "qhub-config.yaml" + # sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "qhub-config.yaml" cat qhub-config.yaml - name: Deploy QHub Cloud From 8e00b15eb772b59d68e51a1f6ae6d69451af4a61 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 14:49:51 -0300 Subject: [PATCH 07/13] add kind cluster create there is no kind config yet switch image tags to test kind load Run test Run test fix formating issue fix formating issue --- .github/workflows/kubernetes_test.yaml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index fca08919f3..b4c2913b6e 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -24,6 +24,9 @@ on: - "setup.cfg" - "pyproject.yoml" +env: + image_names: ( quay.io/nebari/nebari-jupyterhub:main quay.io/nebari/nebari-jupyterlab:main quay.io/nebari/nebari-dask-worker:main ) + jobs: test-kubernetes: name: "Kubernetes Tests" @@ -72,17 +75,15 @@ jobs: docker ps - name: Pull docker images run: | - images=( ${REGISTRY}/nebari-jupyterhub:${TAG} ${REGISTRY}/nebari-jupyterlab:${TAG} ${REGISTRY}/nebari-dask-worker:${TAG} ) - for img in "${images[@]}"; do docker pull $img; done - env: - TAG: sha-43788cc - REGISTRY: quay.io/nebari + images=${{ env.image_names }} + for img in "${images[@]}"; do docker pull $img; echo "--------------------------"; done + - name: Start Kind cluster + run: | + kind create cluster - name: Load Kind run: | - kind load docker-image ${REGISTRY}/nebari-jupyterhub:${TAG} ${REGISTRY}/nebari-jupyterlab:${TAG} ${REGISTRY}/nebari-dask-worker:${TAG} - env: - TAG: sha-43788cc - REGISTRY: quay.io/nebari + images=${{ env.image_names }} + for img in "${images[@]}"; do kind load docker-image $img; echo "--------------------------"; done - name: List docker images in minikube run: | docker images From c06060e9a3c355de5903711b5a5410f31e89b475 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 17:38:49 -0300 Subject: [PATCH 08/13] fix error from merge --- .../stages/02-infrastructure/local/main.tf | 56 ++++++++----------- .../stages/02-infrastructure/local/outputs.tf | 14 ++--- 2 files changed, 29 insertions(+), 41 deletions(-) diff --git a/qhub/template/stages/02-infrastructure/local/main.tf b/qhub/template/stages/02-infrastructure/local/main.tf index 4d3fc288b6..9fd8bb2618 100644 --- a/qhub/template/stages/02-infrastructure/local/main.tf +++ b/qhub/template/stages/02-infrastructure/local/main.tf @@ -1,11 +1,11 @@ terraform { required_providers { kind = { - source = "kyma-incubator/kind" + source = "kyma-incubator/kind" version = "0.0.11" } docker = { - source = "kreuzwerker/docker" + source = "kreuzwerker/docker" version = "2.16.0" } kubectl = { @@ -24,30 +24,30 @@ provider "docker" { } provider "kubernetes" { - host = kind_cluster.default.endpoint + host = kind_cluster.default.endpoint cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate - client_key = kind_cluster.default.client_key - client_certificate = kind_cluster.default.client_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate } provider "kubectl" { - load_config_file = false - host = kind_cluster.default.endpoint + load_config_file = false + host = kind_cluster.default.endpoint cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate - client_key = kind_cluster.default.client_key - client_certificate = kind_cluster.default.client_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate } resource "kind_cluster" "default" { - name = "test-cluster" + name = "test-cluster" wait_for_ready = true kind_config { - kind = "Cluster" + kind = "Cluster" api_version = "kind.x-k8s.io/v1alpha4" node { - role = "general" + role = "general" image = "kindest/node:v1.21.10" } } @@ -60,28 +60,28 @@ resource "kubernetes_namespace" "metallb" { } data "kubectl_path_documents" "metallb" { - pattern = "${path.module}/metallb.yaml" + pattern = "${path.module}/metallb.yaml" } resource "kubectl_manifest" "metallb" { - for_each = toset(data.kubectl_path_documents.metallb.documents) - yaml_body = each.value - wait = true - depends_on = [ kubernetes_namespace.metallb ] + for_each = toset(data.kubectl_path_documents.metallb.documents) + yaml_body = each.value + wait = true + depends_on = [kubernetes_namespace.metallb] } resource "kubectl_manifest" "load-balancer" { yaml_body = yamlencode({ apiVersion = "v1" - kind = "ConfigMap" + kind = "ConfigMap" metadata = { namespace = kubernetes_namespace.metallb.metadata.0.name - name = "config" + name = "config" } data = { config = yamlencode({ address-pools = [{ - name = "default" + name = "default" protocol = "layer2" addresses = [ "${local.metallb_ip_min}-${local.metallb_ip_max}" @@ -91,13 +91,13 @@ resource "kubectl_manifest" "load-balancer" { } }) - depends_on = [ kubectl_manifest.metallb ] + depends_on = [kubectl_manifest.metallb] } data "docker_network" "kind" { name = "kind" - depends_on = [ kind_cluster.default ] + depends_on = [kind_cluster.default] } locals { @@ -108,16 +108,4 @@ locals { metallb_ip_max = cidrhost([ for network in data.docker_network.kind.ipam_config : network if network.gateway != "" ][0].subnet, 406) - -output "kubernetes_credentials" { - description = "Parameters needed to connect to kubernetes cluster locally" - value = { - config_path = pathexpand("~/.kube/config") - config_context = var.kube_context - } -} - -output "kubeconfig_filename" { - description = "filename for qhub kubeconfig" - value = pathexpand("~/.kube/config") } diff --git a/qhub/template/stages/02-infrastructure/local/outputs.tf b/qhub/template/stages/02-infrastructure/local/outputs.tf index 5356dd560b..bb3134b493 100644 --- a/qhub/template/stages/02-infrastructure/local/outputs.tf +++ b/qhub/template/stages/02-infrastructure/local/outputs.tf @@ -1,20 +1,20 @@ output "kubernetes_credentials" { description = "Parameters needed to connect to kubernetes cluster locally" - sensitive = true - value = { - host = kind_cluster.default.endpoint + sensitive = true + value = { + host = kind_cluster.default.endpoint cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate - client_key = kind_cluster.default.client_key - client_certificate = kind_cluster.default.client_certificate + client_key = kind_cluster.default.client_key + client_certificate = kind_cluster.default.client_certificate } } resource "local_file" "default" { - content = kind_cluster.default.kubeconfig + content = kind_cluster.default.kubeconfig filename = var.kubeconfig_filename } output "kubeconfig_filename" { description = "filename for qhub kubeconfig" - value = var.kubeconfig_filename + value = var.kubeconfig_filename } From f9f6bf76f12f4af00d12976de56e0eaf6cabe0d0 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 17:52:02 -0300 Subject: [PATCH 09/13] Fix pre-commit error with yaml-check --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d6cf16ba74..fd02da0a10 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,6 +14,7 @@ repos: - id: check-yaml # jinja2 templates for helm charts exclude: 'qhub/template/stages/07-kubernetes-services/modules/kubernetes/services/(clearml/chart/templates/.*|prefect/chart/templates/.*)' + args: [--allow-multiple-documents] - repo: https://github.com/codespell-project/codespell rev: v2.1.0 From 2fb4394dea01d355e17cdea5076b705fcbf8cf2e Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 19:20:39 -0300 Subject: [PATCH 10/13] Update Ingress IP match on hosts --- .github/workflows/kubernetes_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index b4c2913b6e..41e25d1a00 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -92,7 +92,7 @@ jobs: ip route - name: Add DNS entry to hosts run: | - sudo echo "172.20.1.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts + sudo echo "172.18.1.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts - name: Initialize QHub Cloud run: | mkdir -p local-deployment From 9225748ed19e9541324dd809b96a17aa51e3ea37 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Thu, 4 Aug 2022 22:15:23 -0300 Subject: [PATCH 11/13] rm kubectl check status --- .github/workflows/kubernetes_test.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 41e25d1a00..118c4937b7 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -186,8 +186,3 @@ jobs: run: | cd local-deployment qhub destroy --config qhub-config.yaml - - - name: Basic kubectl checks after cleanup - if: always() - run: | - kubectl get all,cm,secret,ing -A From 3c34ab8a3c2b9a6aeebe04cd8afcd56c2b8e917d Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Tue, 9 Aug 2022 12:48:04 -0300 Subject: [PATCH 12/13] rm not used steps --- .github/workflows/kubernetes_test.yaml | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index b4c2913b6e..5f0e865273 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -58,8 +58,6 @@ jobs: run: | mkdir -p bin pushd bin - curl -L https://github.com/kubernetes-sigs/kind/releases/download/v0.12.0/kind-linux-amd64 -o kind - chmod +x kind curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl chmod +x kubectl @@ -73,20 +71,6 @@ jobs: docker info docker ps - - name: Pull docker images - run: | - images=${{ env.image_names }} - for img in "${images[@]}"; do docker pull $img; echo "--------------------------"; done - - name: Start Kind cluster - run: | - kind create cluster - - name: Load Kind - run: | - images=${{ env.image_names }} - for img in "${images[@]}"; do kind load docker-image $img; echo "--------------------------"; done - - name: List docker images in minikube - run: | - docker images - name: Get routing table for docker pods run: | ip route @@ -99,9 +83,9 @@ jobs: cd local-deployment qhub init local --project=thisisatest --domain github-actions.qhub.dev --auth-provider=password - # # Need smaller profiles on Minikube - # sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "qhub-config.yaml" - # sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "qhub-config.yaml" + # Need smaller profiles on Local Kind + sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "qhub-config.yaml" + sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "qhub-config.yaml" cat qhub-config.yaml - name: Deploy QHub Cloud From 07132bdf2a042e051417e1f83fd322f8047c6012 Mon Sep 17 00:00:00 2001 From: viniciusdc Date: Tue, 9 Aug 2022 16:00:27 -0300 Subject: [PATCH 13/13] Add existing deployment mode --- .github/workflows/kubernetes_test.yaml | 3 --- .github/workflows/test-provider.yaml | 1 + qhub/initialize.py | 24 +++++++++++++++++++ qhub/provider/cicd/github.py | 2 +- qhub/schema.py | 7 ++++++ qhub/stages/input_vars.py | 4 ++++ qhub/stages/tf_objects.py | 10 ++++++++ .../stages/02-infrastructure/existing/main.tf | 17 +++++++++++++ qhub/utils.py | 2 +- 9 files changed, 65 insertions(+), 5 deletions(-) create mode 100644 qhub/template/stages/02-infrastructure/existing/main.tf diff --git a/.github/workflows/kubernetes_test.yaml b/.github/workflows/kubernetes_test.yaml index 0d6a3d443c..56cf6bdbf1 100644 --- a/.github/workflows/kubernetes_test.yaml +++ b/.github/workflows/kubernetes_test.yaml @@ -24,9 +24,6 @@ on: - "setup.cfg" - "pyproject.yoml" -env: - image_names: ( quay.io/nebari/nebari-jupyterhub:main quay.io/nebari/nebari-jupyterlab:main quay.io/nebari/nebari-dask-worker:main ) - jobs: test-kubernetes: name: "Kubernetes Tests" diff --git a/.github/workflows/test-provider.yaml b/.github/workflows/test-provider.yaml index 5044647568..26838773b5 100644 --- a/.github/workflows/test-provider.yaml +++ b/.github/workflows/test-provider.yaml @@ -46,6 +46,7 @@ jobs: - do - gcp - local + - existing cicd: - none - github-actions diff --git a/qhub/initialize.py b/qhub/initialize.py index 3a36b28c38..c213e38086 100644 --- a/qhub/initialize.py +++ b/qhub/initialize.py @@ -119,6 +119,23 @@ } } +EXISTING = { + "node_selectors": { + "general": { + "key": "kubernetes.io/os", + "value": "linux", + }, + "user": { + "key": "kubernetes.io/os", + "value": "linux", + }, + "worker": { + "key": "kubernetes.io/os", + "value": "linux", + }, + } +} + DIGITAL_OCEAN = { "region": "nyc3", "kubernetes_version": "PLACEHOLDER", @@ -390,6 +407,13 @@ def render_config( set_kubernetes_version(config, kubernetes_version, cloud_provider) if "AWS_DEFAULT_REGION" in os.environ: config["amazon_web_services"]["region"] = os.environ["AWS_DEFAULT_REGION"] + + elif cloud_provider == "existing": + config["theme"]["jupyterhub"][ + "hub_subtitle" + ] = "Autoscaling Compute Environment" + config["existing"] = EXISTING.copy() + elif cloud_provider == "local": config["theme"]["jupyterhub"][ "hub_subtitle" diff --git a/qhub/provider/cicd/github.py b/qhub/provider/cicd/github.py index ee8f09ccbb..a1a961a3cc 100644 --- a/qhub/provider/cicd/github.py +++ b/qhub/provider/cicd/github.py @@ -148,7 +148,7 @@ def gha_env_vars(config): env_vars["DIGITALOCEAN_TOKEN"] = "${{ secrets.DIGITALOCEAN_TOKEN }}" elif config["provider"] == "gcp": env_vars["GOOGLE_CREDENTIALS"] = "${{ secrets.GOOGLE_CREDENTIALS }}" - elif config["provider"] == "local": + elif config["provider"] in ["local", "existing"]: # create mechanism to allow for extra env vars? pass else: diff --git a/qhub/schema.py b/qhub/schema.py index 5838d2b405..ea2d9622f6 100644 --- a/qhub/schema.py +++ b/qhub/schema.py @@ -24,6 +24,7 @@ class TerraformStateEnum(str, enum.Enum): class ProviderEnum(str, enum.Enum): local = "local" + existing = "existing" do = "do" aws = "aws" gcp = "gcp" @@ -311,6 +312,11 @@ class LocalProvider(Base): node_selectors: typing.Dict[str, KeyValueDict] +class ExistingProvider(Base): + kube_context: typing.Optional[str] + node_selectors: typing.Dict[str, KeyValueDict] + + # ================= Theme ================== @@ -488,6 +494,7 @@ class Main(Base): default_images: DefaultImages storage: typing.Dict[str, str] local: typing.Optional[LocalProvider] + existing: typing.Optional[ExistingProvider] google_cloud_platform: typing.Optional[GoogleCloudPlatformProvider] amazon_web_services: typing.Optional[AmazonWebServicesProvider] azure: typing.Optional[AzureProvider] diff --git a/qhub/stages/input_vars.py b/qhub/stages/input_vars.py index c521dfacb2..ed89ecdfc2 100644 --- a/qhub/stages/input_vars.py +++ b/qhub/stages/input_vars.py @@ -42,6 +42,8 @@ def stage_02_infrastructure(stage_outputs, config): ), "kube_context": config["local"].get("kube_context"), } + elif config["provider"] == "existing": + return {"kube_context": config["existing"].get("kube_context")} elif config["provider"] == "do": return { "name": config["project_name"], @@ -170,6 +172,8 @@ def _calculate_note_groups(config): group: {"key": "doks.digitalocean.com/node-pool", "value": group} for group in ["general", "user", "worker"] } + elif config["provider"] == "existing": + return config["existing"].get("node_selectors") else: return config["local"]["node_selectors"] diff --git a/qhub/stages/tf_objects.py b/qhub/stages/tf_objects.py index 0f154cc6bd..8792c8d218 100644 --- a/qhub/stages/tf_objects.py +++ b/qhub/stages/tf_objects.py @@ -107,6 +107,16 @@ def QHubTerraformState(directory: str, qhub_config: Dict): container_name=f"{qhub_config['project_name']}-{qhub_config['namespace']}-state", key=f"terraform/{qhub_config['project_name']}-{qhub_config['namespace']}/{directory}", ) + elif qhub_config["provider"] == "existing": + optional_kwargs = {} + if "kube_context" in qhub_config["existing"]: + optional_kwargs["confix_context"] = qhub_config["existing"]["kube_context"] + return TerraformBackend( + "kubernetes", + secret_suffix=f"{qhub_config['project_name']}-{qhub_config['namespace']}-{directory}", + load_config_file=True, + **optional_kwargs, + ) elif qhub_config["provider"] == "local": optional_kwargs = {} if "kube_context" in qhub_config["local"]: diff --git a/qhub/template/stages/02-infrastructure/existing/main.tf b/qhub/template/stages/02-infrastructure/existing/main.tf new file mode 100644 index 0000000000..4383e8294c --- /dev/null +++ b/qhub/template/stages/02-infrastructure/existing/main.tf @@ -0,0 +1,17 @@ +variable "kube_context" { + description = "Optional kubernetes context to use to connect to kubernetes cluster" + type = string +} + +output "kubernetes_credentials" { + description = "Parameters needed to connect to kubernetes cluster locally" + value = { + config_path = pathexpand("~/.kube/config") + config_context = var.kube_context + } +} + +output "kubeconfig_filename" { + description = "filename for qhub kubeconfig" + value = pathexpand("~/.kube/config") +} diff --git a/qhub/utils.py b/qhub/utils.py index 7691f24915..6dbbc414a5 100644 --- a/qhub/utils.py +++ b/qhub/utils.py @@ -179,7 +179,7 @@ def check_cloud_credentials(config): f"""The environment variables AWS_SECRET_ACCESS_KEY and SPACES_SECRET_ACCESS_KEY must be equal\n See {DO_ENV_DOCS} for more information""" ) - elif config["provider"] == "local": + elif config["provider"] in ["local", "existing"]: pass else: raise ValueError("Cloud Provider configuration not supported")