diff --git a/.gitattributes b/.gitattributes index 00fc2af12..ab2438c04 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1 @@ -secrets/** filter=git-crypt diff=git-crypt binderhub/_version.py export-subst diff --git a/.gitignore b/.gitignore index 3a8687f6e..9d2c89a5a 100644 --- a/.gitignore +++ b/.gitignore @@ -104,12 +104,13 @@ package-lock.json # Built files binderhub/static/dist helm-chart/binderhub/charts +helm-chart/binderhub/requirements.lock +testing/k8s-binder-k8s-hub/binderhub-chart-config-remote.yaml +ci/id_rsa # Instructions we download k8s.txt helm.txt -travis -helm-chart/travis-binder.yaml # Federation data page doc/federation/data-federation.txt diff --git a/.travis.yml b/.travis.yml index 2f4e9b224..e8a225e12 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,62 +1,163 @@ dist: bionic language: python python: - - 3.7 + - 3.8 git: depth: false services: - docker +env: + global: + # Version of CI dependencies to be installed + - MINIKUBE_VERSION=1.14.2 # https://github.com/kubernetes/minikube/releases + - KUBE_VERSION=1.19.3 # https://github.com/kubernetes/kubernetes/releases + - HELM_VERSION=3.4.0 # https://github.com/helm/helm/releases + + # GITHUB_ACCESS_TOKEN - to increase quota with GitHub + - secure: ugOe1mIyit6BmzuTLtzVM7+edswAES1TfAnGST0EkYv341z0NnqRGdRJYtH1q1rRoZ6ciSnjHdVe8zmentxDwRJSi50888ro8EG7NnvAjG31cdwEgm7Vdv4sEbt7Dp8gYxKb5kWtt9w4IxPdMocpWPeNBmjctX+7QWwvIJZstWDjOtt8BhDufiTTHeiZR3FTNy5ykJXnFXn/WMsZoxI/a3GAgSzV4wKu0OzZYYk3GY20inJKUwxwWgyNbzQXecWlup6REd+YYrq0h4hJESyrtTQwHsijssCuy+mH8fxPpPZuF2WtHAqej0zGVKyUU/xmpCgdIHbjsheIyEE1leYn+USuoWqiHf0gjx2sYvBJP4HizJNPgj67UUNcUfGXzjegy6cDZE8iXW5clDRDkmWgx53VFlVgWV0TgdxtmOFK3IpQVOoaWc4q0e4T+/38rmUiBFnbbEep2q1wwjZojYhyaIrsEG9jOhtLW2rTADc5fdXURYqfPpubM1SCTseT43T86sRfOH3x8o9MSDnVejZ317oPfoZyqrL/FwpW45M68v/RAfvb6mY/5vM7Jw+70trgz4RhLQS55nuqnZ7NwK8ooX27YnvVnx3YisADz1Cf/A/bNixj8pVkJ7TTw/TnR/gTzat2wfsgPTWRtC8hhOyLSFnZh3J5OyeHQQMmdPspilE= + + +stages: + - name: publish + if: tag IS present OR ( branch IN (master) AND type IN (push) ) + - name: test + before_install: - - nvm install 14; nvm use 14 - - pip install --upgrade pip setuptools -install: - - export PATH=$PWD/bin:$PATH - - ./ci/install.sh - - pip install --upgrade -r dev-requirements.txt - - npm install - - npm run webpack - - pip install --upgrade . -r helm-chart/images/binderhub/requirements.txt + # Exit immediately if a command exits with a non-zero status. + - set -e + - . ci/common + # NOTE: The latest docker python package (4.3.0) requires a more modern docker # version (newer than 18.06.0-ce / API version: 1.38) which is not yet # available on travis. - # ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1743 - - pip install -U "docker~=4.2.0" + - pip install "docker~=4.2.0" + + # Fix to avoid rebuilding images already built + - remove_docker_mirror_on_travis + + +install: + # Setup our minikube based Kubernetes cluster + - setup_minikube + - setup_kubectl + - setup_helm + - helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/ + - helm repo update + + # Install node/npm which the BinderHub Python package use as a build step to + # generate static HTML/JS to serve. + - nvm install 14; nvm use 14 + # Install NodeJS dependencies and build HTML/CSS/JS in binderhub/static into + # distributable dist/bundle.js in as static resource required by the binderhub + # Python package. + - npm install + - npm run webpack + + # Install development requirements, package requirements, and frozen image + # requirements, with an increasing priority. + - pip install -r dev-requirements.txt + - pip install . + - pip install -r helm-chart/images/binderhub/requirements.txt + + # Print information about the Pyton environment - pip freeze -script: - - export BINDER_TEST_NAMESPACE=binder-test-$TEST - - ./ci/test-$TEST + +jobs: + ## don't wait for the jobs that are allowed to fail to report success + ## + ## ref: https://docs.travis-ci.com/user/customizing-the-build/#rows-that-are-allowed-to-fail + ## ref: https://docs.travis-ci.com/user/customizing-the-build/#fast-finishing + ## + allow_failures: [] + fast_finish: true + + ## define individual jobs that can rely on defaults from above + include: + - stage: test + name: main + script: + - ./testing/local-binder-k8s-hub/install-jupyterhub-chart + - await_jupyterhub + - pytest -m "not auth" --log-cli-level=10 -vsx --cov binderhub + + - stage: test + name: auth + script: + - ./testing/local-binder-k8s-hub/install-jupyterhub-chart --auth + - await_jupyterhub + - pytest -m "auth" --log-cli-level=10 -vsx --cov binderhub + + - stage: test + name: helm + script: + # Prepare: builds images and updates default values.yaml to reference + # the images + - echo "Preparing local BinderHub Helm chart" + - helm dependency update ./helm-chart/binderhub + - (cd helm-chart && chartpress) + - git --no-pager diff + + # Validate: renders Helm chart templates and validates the rendered k8s + # manifests against the k8s api-server. + - | + helm template --validate binderhub-test helm-chart/binderhub \ + --values testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml \ + --set config.BinderHub.hub_url=http://$(minikube ip):30902 \ + --set config.BinderHub.access_token=$GITHUB_ACCESS_TOKEN + + # Install: installs the local BinderHub Helm chart and await it to + # become ready. + - echo "Installing local BinderHub Helm chart with JupyterHub as a dependency into k8s cluster" + - | + helm upgrade --install binderhub-test helm-chart/binderhub \ + --values testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml \ + --set config.BinderHub.hub_url=http://$(minikube ip):30902 \ + --set config.BinderHub.access_token=$GITHUB_ACCESS_TOKEN + - await_binderhub + + # Test + - export BINDER_URL=http://$(minikube ip):30901 + - echo "Running tests against $BINDER_URL" + - pytest -vsx -m "remote" --log-cli-level=10 --cov binderhub + + + - stage: publish + name: publish:helm-chart + install: # overrides default + - setup_helm + - pip install chartpress + script: # overrides default + - ./ci/publish + after_failure: + - echo "Required dummy override of default 'after_failure' in .travis.yml." + after_success: + - echo "Required dummy override of default 'after_success' in .travis.yml." + env: + # DOCKER_USERNAME and DOCKER_PASSWORD are represented as the secure + # entries below as encrypted environment variables used in the publish + # script to push built docker images to docker hub. + # + # ref: https://docs.travis-ci.com/user/environment-variables/#encrypting-environment-variables + # + # encrypted_d8355cc3d845_key, encrypted_d8355cc3d845_iv are environment + # variables are also available because they are set in the TravisCI UI. + # They are used to decrypt a private SSH key registered with push rights + # to our GitHub repo acting as a Helm container registry. + # + # ref: https://travis-ci.com/github/jupyterhub/binderhub/settings + # + - secure: BuO4oUz5YZvWhH919Tk8h3McM60NybLImIwB+0C4cmcDC/Z4uS1Jh8R8UbD3vIkJyjPgd+WuoaYGzzgJfCiS5i5TZZfi5yI0URu67fc044vaS7zSegZWsuN40mP4QNYTb+VdYniav8pqqyPyUpKNbOQ+/YJ+BWrC/ncqSL+P+UR6PE9T3TQ4XDuB45z0B2hhfDWBMpP6KtGae7YOWstIPi8ufiS76jjRzE4ziLqsOSwJRGhRbjJXqdcZeH2d54jUJSzCEGMSo5lxrFL27YOJ6Vuzr0V208AiQe60CyxtAzyiamVoE9U8pYOnv9KRDMeROSdz2HJkGetedNgCHpf0mNUWLZzQ6udzXtKeG2pxKeLDYKSm8Y3GGGa87nWRaWS1dYwCTHLe1r6Jwt6QwuBqqIa0oOMujStTEgbUOLLw5e80kSWqTxb52XnDi5SgOMGNzcylAYJLHFCL5U9ShAyZWGRAy0p7tFycXy44/k7RiqXr6Xur7NuB9bzXkmsDa7qS1t2DXoUA8FlpxfvqaSqVuFSrN16JHQeBOZGEYondQ2NPvi2vNT68Us4saWUxBf8oxF5pctbJYDDOxWpYFq/pza2+Elset9dv5KjCb61qtotLpo4PER4zgvde5/5HRa1CAOJlVfZB1Au3+b1NoVWNnPcwlb2UGut1+bcaDWyPf20= + - secure: uqQrm6T6oGloQIHs1tLtQm3iIZFUtv1lNgtT+oCfYw3MC1CJ5AkKRaClAcVlFVOoCAE1GeCbnkob0i+LkwE+FQ1uw0fH9QyGx0PiEBrT3ilOXtkHvLXzcB/8PqPnIRR+vQnqigX2wsfiy0po99gBeaKiQjfybZ8UVp03M7+peQ4Y/Dxoj0G0nYZPsCo4F5plfdalJQX+ZVi1QlkglTotVxvb8QjrxXUrJ3OgXyVgW5fmkrRY6rWlb7RkSPlxmOhOZ7KM0BghMvox2VAD2jTatW/IEjqqaek8VqyKe1Dhw8wRUwQIwAH0VV1tUslHGcPcpbosGTPsfH7pTSUIa6ZRWd2dymMCpp7NwmFPm38QOZS/psiYNFPM6FvQnyqmtiUiU9MfcwMNJxiAUyTSslficd1Lt/aTTgBJQnK9Zd+/J6uuSc1aMGSgVNaM6eXxyNITXqz067zn8apCJm6UUEVM5FFs5SdfNXycJeV7qBwT8oe2+lvC4YzB2bHVGlnynK4IBPZsSFiBxPTBhcHVZp2FIMoC7cm/2dKW0gKPMho2Glhb7Zxn0mFej7XrT+IllD4yCjYSuSG2hpNMgE0+pc/89F2eLtey+oRAH0p4+K8DXsVCHsb7rSq+i7WDtFhFR0DBRWPG8B//CyLSbr4UoKumShbXYCI1dpSYzATmVyphQc8= + after_failure: - - | - # get pod logs - kubectl get pod --all-namespaces - for pod in $(kubectl get pod --no-headers --namespace=$BINDER_TEST_NAMESPACE | awk '{print $1}'); do - echo $pod - kubectl logs --namespace=$BINDER_TEST_NAMESPACE $pod || echo 'no logs' - done + - full_namespace_report + + after_success: # make sure we are back at the top of the checkout directory no matter # what previous commands might have done or not - cd $TRAVIS_BUILD_DIR - codecov - -env: - matrix: - - TEST=main - - TEST=auth - - TEST=helm - global: - # minikube version: https://github.com/kubernetes/minikube/releases - - MINIKUBE_VERSION=1.14.2 - # kubernetes version: https://github.com/kubernetes/kubernetes/releases - - KUBE_VERSION=1.19.3 - # helm version: https://github.com/helm/helm/releases - - HELM_VERSION=2.16.12 - - # DOCKER_USERNAME and DOCKER_PASSWORD - - secure: BuO4oUz5YZvWhH919Tk8h3McM60NybLImIwB+0C4cmcDC/Z4uS1Jh8R8UbD3vIkJyjPgd+WuoaYGzzgJfCiS5i5TZZfi5yI0URu67fc044vaS7zSegZWsuN40mP4QNYTb+VdYniav8pqqyPyUpKNbOQ+/YJ+BWrC/ncqSL+P+UR6PE9T3TQ4XDuB45z0B2hhfDWBMpP6KtGae7YOWstIPi8ufiS76jjRzE4ziLqsOSwJRGhRbjJXqdcZeH2d54jUJSzCEGMSo5lxrFL27YOJ6Vuzr0V208AiQe60CyxtAzyiamVoE9U8pYOnv9KRDMeROSdz2HJkGetedNgCHpf0mNUWLZzQ6udzXtKeG2pxKeLDYKSm8Y3GGGa87nWRaWS1dYwCTHLe1r6Jwt6QwuBqqIa0oOMujStTEgbUOLLw5e80kSWqTxb52XnDi5SgOMGNzcylAYJLHFCL5U9ShAyZWGRAy0p7tFycXy44/k7RiqXr6Xur7NuB9bzXkmsDa7qS1t2DXoUA8FlpxfvqaSqVuFSrN16JHQeBOZGEYondQ2NPvi2vNT68Us4saWUxBf8oxF5pctbJYDDOxWpYFq/pza2+Elset9dv5KjCb61qtotLpo4PER4zgvde5/5HRa1CAOJlVfZB1Au3+b1NoVWNnPcwlb2UGut1+bcaDWyPf20= - - secure: uqQrm6T6oGloQIHs1tLtQm3iIZFUtv1lNgtT+oCfYw3MC1CJ5AkKRaClAcVlFVOoCAE1GeCbnkob0i+LkwE+FQ1uw0fH9QyGx0PiEBrT3ilOXtkHvLXzcB/8PqPnIRR+vQnqigX2wsfiy0po99gBeaKiQjfybZ8UVp03M7+peQ4Y/Dxoj0G0nYZPsCo4F5plfdalJQX+ZVi1QlkglTotVxvb8QjrxXUrJ3OgXyVgW5fmkrRY6rWlb7RkSPlxmOhOZ7KM0BghMvox2VAD2jTatW/IEjqqaek8VqyKe1Dhw8wRUwQIwAH0VV1tUslHGcPcpbosGTPsfH7pTSUIa6ZRWd2dymMCpp7NwmFPm38QOZS/psiYNFPM6FvQnyqmtiUiU9MfcwMNJxiAUyTSslficd1Lt/aTTgBJQnK9Zd+/J6uuSc1aMGSgVNaM6eXxyNITXqz067zn8apCJm6UUEVM5FFs5SdfNXycJeV7qBwT8oe2+lvC4YzB2bHVGlnynK4IBPZsSFiBxPTBhcHVZp2FIMoC7cm/2dKW0gKPMho2Glhb7Zxn0mFej7XrT+IllD4yCjYSuSG2hpNMgE0+pc/89F2eLtey+oRAH0p4+K8DXsVCHsb7rSq+i7WDtFhFR0DBRWPG8B//CyLSbr4UoKumShbXYCI1dpSYzATmVyphQc8= - # GITHUB_ACCESS_TOKEN - - secure: ugOe1mIyit6BmzuTLtzVM7+edswAES1TfAnGST0EkYv341z0NnqRGdRJYtH1q1rRoZ6ciSnjHdVe8zmentxDwRJSi50888ro8EG7NnvAjG31cdwEgm7Vdv4sEbt7Dp8gYxKb5kWtt9w4IxPdMocpWPeNBmjctX+7QWwvIJZstWDjOtt8BhDufiTTHeiZR3FTNy5ykJXnFXn/WMsZoxI/a3GAgSzV4wKu0OzZYYk3GY20inJKUwxwWgyNbzQXecWlup6REd+YYrq0h4hJESyrtTQwHsijssCuy+mH8fxPpPZuF2WtHAqej0zGVKyUU/xmpCgdIHbjsheIyEE1leYn+USuoWqiHf0gjx2sYvBJP4HizJNPgj67UUNcUfGXzjegy6cDZE8iXW5clDRDkmWgx53VFlVgWV0TgdxtmOFK3IpQVOoaWc4q0e4T+/38rmUiBFnbbEep2q1wwjZojYhyaIrsEG9jOhtLW2rTADc5fdXURYqfPpubM1SCTseT43T86sRfOH3x8o9MSDnVejZ317oPfoZyqrL/FwpW45M68v/RAfvb6mY/5vM7Jw+70trgz4RhLQS55nuqnZ7NwK8ooX27YnvVnx3YisADz1Cf/A/bNixj8pVkJ7TTw/TnR/gTzat2wfsgPTWRtC8hhOyLSFnZh3J5OyeHQQMmdPspilE= diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9335b2497..dd827aac3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,24 +3,33 @@ Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html). -There are several different setups for developing BinderHub, depending on which -parts of it you want to change: the [documentation](#documentation-changes), -the [user interface](#user-innterface-changes), or the -[kubernetes integration](#Kubernetes-integration-changes). +Depending on what you want to develop, you can setup BinderHub in different ways. +- [Develop documentation](#develop-documentation). +- [Develop user interface](#develop-user-interface). A BinderHub webserver is running locally and + JupyterHub is mocked, this setup doesn't involve Kubernetes. +- [Develop Kubernetes integration](#develop-kubernetes-integration). A BinderHub webserver is running locally, + and JupyterHub is installed in a Kubernetes cluster. +- [Develop Helm chart](#develop-helm-chart) - The BinderHub Helm chart with JupyterHub as a + dependency is installed in a Kubernetes cluster. + + This document also contains information on [how to run tests](#running-tests) and + [common maintainer tasks](#common-maintainer-tasks). -## Documentation changes +## Develop documentation -Work on the documentation requires the least amount of setup. You will need -to have a modern version of [Python](https://www.python.org/). The documentation -uses the [reStructuredText markup language](http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html). +You are assumed to have a modern version of [Python](https://www.python.org/). +The documentation uses the [reStructuredText markup +language](http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html). + +1. Clone the BinderHub repository to your local computer and `cd` into it. -1. Clone the BinderHub repository to your local computer and ```cd``` into it. ```bash git clone https://github.com/jupyterhub/binderhub cd binderhub ``` -1. 1. Install BinderHub and the documentation tools: + +1. Install BinderHub and the documentation tools: ```bash python3 -m pip install -r doc/doc-requirements.txt @@ -43,234 +52,261 @@ uses the [reStructuredText markup language](http://www.sphinx-doc.org/en/master/ terminal with `open _build/html/index.html`. -## User interface changes +## Develop user interface + +Developing BinderHub's user interface can be done both without Kubernetes and +JupyterHub by mocking that interaction. You are assumed to have a modern version +of [Python](https://www.python.org/) and [node / npm](https://nodejs.org/) +installed. -Work on the user interface requires a medium amount of setup. You will need -to have a modern version of [Python](https://www.python.org/) and -[npm](https://www.npmjs.com) installed. +1. Clone the BinderHub git repository to your local computer and `cd` into it. -1. Clone the BinderHub repository to your local computer and ```cd``` into it. ```bash git clone https://github.com/jupyterhub/binderhub cd binderhub ``` -1. Install BinderHub: +1. Install BinderHub, the Python package. ```bash python3 -m pip install -e . ``` -1. Install the JavaScript dependencies: +1. Install the NodeJS dependencies from package.json. ```bash npm install ``` -1. Create the JS and CSS bundles with: +1. Create the JS and CSS bundles BinderHub serves as a webserver to visitors. ```bash - npm run webpack + npm run webpack:watch ``` - Note: you need to run this every time you make a change to the CSS or JS - for it to take effect. -1. Run it! +1. Start the BinderHub webserver locally. ```bash - python3 -m binderhub -f testing/localonly/binderhub_config.py + python3 -m binderhub -f testing/local-binder-mocked-hub/binderhub_config.py ``` -1. Visit http://localhost:8585 to see it in action. +1. Visit the BinderHub webserver at http://localhost:8585. Building and launching repositories will not work. You can still work on the -user interface of those parts as BinderHub is configured to fake those -actions. You can tell you are using the fake builder and launcher from the fact -that the build will never complete. +user interface of those parts as BinderHub is configured to fake those actions. +You can tell you are using the fake builder and launcher from the fact that the +build will never complete. -To learn how to set yourself with a BinderHub development environment that -lets you modify the builder and launcher refer to -[Kubernetes integration changes](#Kubernetes-integration-changes). +To learn how to set yourself with a BinderHub development environment that lets +you modify the builder and launcher refer to [Develop Kubernetes +integration](#develop-kubernetes-integration) or [Develop Helm +chart](#develop-helm-chart). +## Develop Kubernetes integration -## Kubernetes integration changes +This requires `helm` and a functional Kubernetes cluster. Please do +[preliminary Kubernetes setup](#kubernetes-setup) if you haven't already +before continuing here. -Setting yourself up to make changes to the kubernetes integration of BinderHub -requires a few one-time setup steps. These steps are described in the -"One-time installation" section below. Follow those first then return here for -day to day development procedures. +With a Kubernetes cluster running, as you verify with `kubectl version`, you can +continue. +1. Locally install BinderHub as a Python package and its development + requirements locally. -### Day to day development tasks + ```bash + python3 -m pip install -e . -r dev-requirements.txt + ``` -After having setup minikube and helm once, these are the tasks you need for -every day development. +1. Install the JupyterHub Helm chart by itself into your Kubernetes cluster + current namespace. -* Start and stop minikube with `minikube start` and `minikube stop`. -* Install JupyterHub in minikube with helm `./testing/minikube/install-hub` -* Setup `docker` to use the same Docker daemon as your minikube cluster `eval $(minikube docker-env)` -* Start BinderHub `python3 -m binderhub -f testing/minikube/binderhub_config.py` -* Visit your BinderHub at[http://localhost:8585](http://localhost:8585) + ```bash + # Append --auth here if you want to develop against a non-public BinderHub + # that relies on JupyterHub's configured Authenticator to decide if the users + # are allowed access or not. + ./testing/local-binder-k8s-hub/install-jupyterhub-chart + ``` -To execute most of our test suite you need a running minikube cluster. -It does not need to have anything installed on it though: +1. Configure `docker` using environment variables to use the same Docker daemon + as your `minikube` cluster. This means images you build are directly + available to the cluster. -```bash -minikube start -pytest -svx -m "not auth_test" -``` + ```bash + eval $(minikube docker-env) + ``` -The tests should generate familiar pytest like output and complete in a few -seconds. +1. Start BinderHub with the testing config file. -To execute all the main tests use `./ci/test-main` which will setup a -JupyterHub on minikube for you. These tests will generate a lot of output and -take a few minutes to run. The tests will attempt to clean up after themselves -on your minikube cluster. + ```bash + python3 -m binderhub -f testing/local-binder-k8s-hub/binderhub_config.py + ``` -To execute the tests related to authentication use `./ci/test-auth` which will -setup a JupyterHub on minikube for you and use configuration files to configure -your BinderHub to require authentication. These tests will generate a lot of -output and take a few minutes to run. The tests will attempt to clean up after -themselves on your minikube cluster. +1. Visit [http://localhost:8585](http://localhost:8585) -To manually test changes to the Helm chart you will have to build the chart, -all images involved and deploy it locally. Steps to do this: +1. Congratulations, you can now make changes and see how they influence the + deployment. You may be required to restart the BinderHub depending on what + you change. You can also start running `pytest` tests to verify the + Deployment functions as it should. -1. start minikube -1. setup docker to user the minikube dockerd `eval $(minikube docker-env)` -1. build the helm chart `cd helm-chart && chartpress && cd ..` -1. install the BinderHub chart with -``` -helm install \ - --name binder-test \ - --namespace binder-test-helm \ - helm-chart/binderhub \ - -f helm-chart/minikube-binder.yaml -``` -You can now access your BinderHub at: `http://192.168.99.100:30901`. If your -minikube instance has a different IP use `minikube ip` to find it. You will -have to use that IP in two places. Add `--set config.BinderHub.hub_url: http://$IP:30902` -to your `helm install` command and access your BinderHub at `http://$IP:30901`. -Replace `$IP` with the output of `minikube ip`. +### Cleanup resources -To remove the deployment again: `helm delete --purge binder-test`. +1. To cleanup the JupyterHub Helm chart you have installed in Kubernetes... + ```bash + helm delete binderhub-test + ``` -### One-time installation +1. To stop running the Kubernetes cluster... -To run the full BinderHub it needs to have access to a kubernetes cluster -with a JupyterHub installed on it. This is what we will setup in this section. -All the steps are given as command-line commands for Ubuntu systems. They are -used as a common denominator that can be translated into the correct commands -on your local system. + ```bash + minikube stop + ``` -Before you begin, there are a few utilities that need to be installed: -```bash -sudo apt install python3 python3-pip npm curl -``` +## Develop Helm chart -If you a on linux, you may additionally need to install socat for port forwarding: +This requires `helm` and a functional Kubernetes cluster. Please do +[preliminary Kubernetes setup](#kubernetes-setup) if you haven't already +before continuing here. -```bash -sudo apt install socat -``` +With a Kubernetes cluster running, as you verify with `kubectl version`, you can +continue. + +1. Install development requirements, including `pytest` and `chartpress`. -1. Clone the binderhub repository to your local computer and ```cd``` into it. ```bash - git clone https://github.com/jupyterhub/binderhub - cd binderhub + python3 -m pip install -r dev-requirements.txt ``` -1. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) - to run Kubernetes locally. +1. Configure `docker` using environment variables to use the same Docker daemon + as your `minikube` cluster. This means images you build are directly + available to the cluster. + + ```bash + eval $(minikube docker-env) + ``` - To start your minikube cluster , run the command: `minikube start`. This - starts a local kubernetes cluster inside a virtual machine. This command - assumes that you have already installed one of the VM drivers: virtualbox, - xhyve or KVM2. -1. Install helm to manage installing JupyterHub and BinderHub on your cluster, +1. Build the docker images referenced by the Helm chart and update its default + values to reference these images. ```bash - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash + (cd helm-chart && chartpress)` ``` - [Alternative methods](https://docs.helm.sh/using_helm/#installing-the-helm-client) - for helm installation exist if you prefer installing without using the script. +1. Validate, and then install the Helm chart defined in helm-chart/binderhub. -1. Initialize helm in minikube. This command initializes the local CLI and - installs Tiller on your kubernetes cluster in one step: + This validation step is not making any modification to your Kubernetes + cluster, but will use it to validate if the Helm chart's rendered resources + are valid Kubernetes resources according to the Kubernetes cluster. ```bash - helm init + helm template --validate binderhub-test helm-chart/binderhub \ + --values testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml \ + --set config.BinderHub.hub_url=http://$(minikube ip):30902 \ + --set config.BinderHub.access_token=$GITHUB_ACCESS_TOKEN ``` -1. Add the JupyterHub helm charts repo: + If the validation succeeds, go ahead and upgrade/install the Helm chart. ```bash - helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/ - helm repo update + helm upgrade --install binderhub-test helm-chart/binderhub \ + --values testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml \ + --set config.BinderHub.hub_url=http://$(minikube ip):30902 \ + --set config.BinderHub.access_token=$GITHUB_ACCESS_TOKEN + + echo "BinderHub inside the Minikube based Kubernetes cluster is starting up at http://$(minikube ip):30901" ``` -1. Install BinderHub and its development requirements: +1. Congratulations, you can now make changes and run the step above again to see + how they influence the deployment. You can also start running `pytest` tests + to verify the Deployment functions as it should. - ```bash - python3 -m pip install -e . -r dev-requirements.txt - ``` +### Cleanup resources -1. Install JupyterHub in minikube with helm +1. To cleanup resources you have installed and start fresh... ```bash - ./testing/minikube/install-hub + helm delete binderhub-test ``` -1. Before starting your local deployment run: +1. To stop running the Kubernetes cluster... ```bash - eval $(minikube docker-env) + minikube stop ``` - This command sets up `docker` to use the same Docker daemon as your minikube - cluster does. This means images you build are directly available to the - cluster. Note: when you no longer wish to use the minikube host, you can - undo this change by running: +## Kubernetes setup + +A fully functional BinderHub needs to have access to a Kubernetes cluster with a +JupyterHub installed on it. You can either run BinderHub as a Python webserver +locally and install JupyterHub on its own in the Kubernetes cluster, or install +the entire BinderHub Helm chart which installs the JupyterHub Helm chart as a +dependency. + +All the steps are given as command-line commands for Ubuntu systems. They are +used as a common denominator that can be translated into the correct commands on +your local system. + +You are assumed to have a modern version of [Python](https://www.python.org/), +[node / npm](https://nodejs.org/) installed, and the command line tool `curl`. + +1. [Install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/). + +1. Start a minikube Kubernetes cluster inside a virtual machine (virtualbox, + xhyve, or KVM2). ```bash - eval $(minikube docker-env -u) + minikube start ``` -1. Start BinderHub with the testing config file: +1. Install `kubectl` - the CLI to interact with a Kubernetes cluster. - ```bash - python3 -m binderhub -f testing/minikube/binderhub_config.py - ``` + ```bash + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + sudo mv kubectl /usr/local/bin/ + ``` -1. Visit [http://localhost:8585](http://localhost:8585) + Here are the [official installation instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/). -All features should work, including building and launching of repositories. +1. Install `helm` - the Kubernetes package manager. + ```bash + curl -sf https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + ``` -### Tip: Use local repo2docker version + Here are the [official installation instructions](https://helm.sh/docs/intro/install/). -BinderHub runs repo2docker in a container. -For testing the combination of an unreleased repo2docker feature with BinderHub, you can use a locally build repo2docker image. -You can configure the image in the file `testing/minikube/binderhub_config.py` with the following line: +1. Let `helm` know about the official JupyterHub Helm chart repository. -```python -c.BinderHub.build_image = 'jupyter-repo2docker:my_image_tag' -``` + ```bash + helm repo add --force-update jupyterhub https://jupyterhub.github.io/helm-chart/ + helm repo update + ``` -**Important**: the image must be build using the same Docker daemon as the minikube cluster, otherwise you get an error _"Failed to pull image [...] repository does not exist or may require 'docker login'"_. +1. Clone the binderhub git repository to your local computer and `cd` into it. -### Tip: Enable debug logging + ```bash + git clone https://github.com/jupyterhub/binderhub + cd binderhub + ``` -In the file `testing/minikube/binderhub_config.py` add the following line: +### Tip: Use local repo2docker version + +BinderHub runs repo2docker in a container. For testing the combination of an +unreleased repo2docker feature with BinderHub, you can use a locally build +repo2docker image. You can configure the image in the file +`testing/local-binder-k8s-hub/binderhub_config.py` with the following line: ```python -c.BinderHub.debug = True +c.BinderHub.build_image = 'jupyter-repo2docker:my_image_tag' ``` +**Important**: the image must be build using the same Docker daemon as the +minikube cluster, otherwise you get an error _"Failed to pull image [...] +repository does not exist or may require 'docker login'"_. + ### Tip: Increase your GitHub API limit By default, GitHub has a limit of 60 API requests per hour. We recommend @@ -286,35 +322,52 @@ underlying VM, which might be too low to run the builder successfully. You may run `minikube start --memory 8192` to start Minikube with a 8GiB VM underneath. -## Common maintainer tasks -These are tasks that BinderHub maintainers perform. -### Bumping the JupyterHub Helm Chart version +## Running tests -BinderHub uses the [JupyterHub Helm Chart](https://jupyterhub.github.io/helm-chart/) -to install the proper version of JupyterHub. The version that is used is specified -in the BinderHub Meta Chart, `helm-chart/binderhub/requirements.yaml`. +This git repository contains `pytest` based tests that you can run locally. +Depending on your development setup, you may want to run different kind of +tests. You can get some hints on what tests to run and how by inspecting +`.travis.yaml`. -To bump the version of JupyterHub that BinderHub uses, go to the [JupyterHub Helm Chart](https://jupyterhub.github.io/helm-chart/) version page, find the release -hash that you want (e.g. `0.6.0-2c53640`) and update the following field in -the `requirements.yaml` file: +### Environment variables influencing tests +- `BINDER_URL`: An address of an already running BinderHub as reachable from the + tests. If this is set, the test suite will not start temporary local BinderHub + servers but instead interact with the remote BinderHub. +- `GITHUB_ACCESS_TOKEN`: A GitHub access token to help avoid quota limitations + for anonymous users. It is used to enable certain tests making many calls to + GitHub API. - ```yaml - dependencies: - version: "" - ``` +### Pytest marks labelling tests +- `remote`: Tests for them the BinderHub is already running somewhere. +- `github_api`: Tests that communicate with the GitHub API a lot. +- `auth`: Tests related to BinderHub's usage of JupyterHub as an OAuth2 Identity + Provider (IdP) for non public access. -**Make sure to double-check that there are no breaking changes in JupyterHub**. -Sometimes JupyterHub introduces breaking changes to its helm chart (such as the -structure of particular fields). Make sure that none of these changes have been -introduced, particularly when bumping major versions of JupyterHub. + +## Common maintainer tasks + +These are tasks that BinderHub maintainers perform. + + +### Bumping the JupyterHub Helm chart version + +The BinderHub Helm chart depends on the [JupyterHub Helm +chart](https://jupyterhub.github.io/helm-chart/), and its version is pinned +within `helm-chart/binderhub/requirements.yaml`. It is straightforward to update +it with another version from the [JupyterHub Helm chart +repository](https://jupyterhub.github.io/helm-chart/). + +Use the [JupyterHub Helm chart's +changelog](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md) +to prepare for breaking changes associated with the version bump. ### Releasing -Checklist for creating BinderHub releases. For PyPI packaging read https://packaging.python.org/guides/distributing-packages-using-setuptools/#uploading-your-project-to-pypi +#### BinderHub Python package release checklist * update/close the `CHANGES.md` for this release (see below) * create a git tag for the release @@ -327,7 +380,10 @@ Checklist for creating BinderHub releases. For PyPI packaging read https://packa * create a new release on https://github.com/jupyterhub/binderhub/releases * add a new section at the top of the change log for future releases -### Bumping the changelog +For more details, see this [guide on uploading package to +PyPI](https://packaging.python.org/guides/distributing-packages-using-setuptools/#uploading-your-project-to-pypi). + +#### Updating the changelog As BinderHub does not have a typical semver release schedule, we try to update the changelog in `CHANGES.md` every three months. A useful tool diff --git a/binderhub/tests/conftest.py b/binderhub/tests/conftest.py index 451686733..11a2f851f 100644 --- a/binderhub/tests/conftest.py +++ b/binderhub/tests/conftest.py @@ -5,6 +5,7 @@ import inspect import json import os +import subprocess import time from urllib.parse import urlparse from unittest import mock @@ -23,17 +24,21 @@ here = os.path.abspath(os.path.dirname(__file__)) root = os.path.join(here, os.pardir, os.pardir) -minikube_testing_config = os.path.join(root, 'testing', 'minikube', 'binderhub_config.py') -minikube_testing_auth_config = os.path.join(root, 'testing', 'minikube', 'binderhub_auth_config.py') +binderhub_config_path = os.path.join(root, 'testing/local-binder-k8s-hub/binderhub_config.py') +binderhub_config_auth_additions_path = os.path.join(root, 'testing/local-binder-k8s-hub/binderhub_config_auth_additions.py') -TEST_NAMESPACE = os.environ.get('BINDER_TEST_NAMESPACE') or 'binder-test' -KUBERNETES_AVAILABLE = False +# These are automatically determined +K8S_AVAILABLE = False +# get the current context's namespace or assume it is "default" +K8S_NAMESPACE = subprocess.check_output([ + "kubectl", "config", "view", "--minify", "--output", "jsonpath={..namespace}" +], text=True).strip() or "default" ON_TRAVIS = os.environ.get('TRAVIS') -# set BINDER_TEST_URL to run tests against an already-running binderhub +# set BINDER_URL to run tests against an already-running binderhub # this will skip launching BinderHub internally in the app fixture -BINDER_URL = os.environ.get('BINDER_TEST_URL') +BINDER_URL = os.environ.get('BINDER_URL') REMOTE_BINDER = bool(BINDER_URL) @@ -43,7 +48,7 @@ def pytest_configure(config): """ # register our custom markers config.addinivalue_line( - "markers", "auth_test: mark test to run only on auth environments" + "markers", "auth: mark test to run only on auth environments" ) config.addinivalue_line( "markers", "remote: mark test to run only on remote environments" @@ -119,18 +124,17 @@ def _binderhub_config(): Currently separate from the app fixture so that it can have a different scope (only once per session). """ - cfg = PyFileConfigLoader(minikube_testing_config).load_config() - cfg.BinderHub.build_namespace = TEST_NAMESPACE - global KUBERNETES_AVAILABLE + cfg = PyFileConfigLoader(binderhub_config_path).load_config() + global K8S_AVAILABLE try: kubernetes.config.load_kube_config() except Exception: cfg.BinderHub.builder_required = False - KUBERNETES_AVAILABLE = False + K8S_AVAILABLE = False if ON_TRAVIS: pytest.fail("Kubernetes should be available on Travis") else: - KUBERNETES_AVAILABLE = True + K8S_AVAILABLE = True if REMOTE_BINDER: return @@ -150,7 +154,7 @@ def _binderhub_config(): class RemoteBinderHub(object): """Mock class for the app fixture when Binder is remote - Has a URL for the binder location and a configured BinnderHub instance + Has a URL for the binder location and a configured BinderHub instance so tests can look at the configuration of the hub. Note: this only gives back the default configuration. It could be that the @@ -202,7 +206,7 @@ def app(request, io_loop, _binderhub_config): if hasattr(request, 'param') and request.param is True: # load conf for auth test - cfg = PyFileConfigLoader(minikube_testing_auth_config).load_config() + cfg = PyFileConfigLoader(binderhub_config_auth_additions_path).load_config() _binderhub_config.merge(cfg) bhub = BinderHub.instance(config=_binderhub_config) bhub.initialize([]) @@ -221,14 +225,14 @@ def cleanup(): return bhub -def cleanup_pods(namespace, labels): - """Cleanup pods in a namespace that match the given labels""" +def cleanup_pods(labels): + """Cleanup pods in current namespace that match the given labels""" kube = kubernetes.client.CoreV1Api() def get_pods(): - """Return list of pods matching given labels""" + """Return list of pods matching given labels""" return [ - pod for pod in kube.list_namespaced_pod(namespace).items + pod for pod in kube.list_namespaced_pod(namespace=K8S_NAMESPACE).items if all( pod.metadata.labels.get(key) == value for key, value in labels.items() @@ -240,8 +244,8 @@ def get_pods(): print(f"deleting pod {pod.metadata.name}") try: kube.delete_namespaced_pod( + namespace=K8S_NAMESPACE, name=pod.metadata.name, - namespace=namespace, body=kubernetes.client.V1DeleteOptions(grace_period_seconds=0), ) except kubernetes.client.rest.ApiException as e: @@ -260,50 +264,33 @@ def get_pods(): @pytest.fixture(scope='session') def cleanup_binder_pods(request): - """Cleanup running binders. - - Fires at beginning and end of session - """ - if not KUBERNETES_AVAILABLE: - # kubernetes not available, nothing to do + """Cleanup running user sessions at the beginning and end of a session.""" + if not K8S_AVAILABLE: return - def cleanup(): - return cleanup_pods(TEST_NAMESPACE, - {'component': 'singleuser-server'}) + return cleanup_pods({'component': 'singleuser-server'}) cleanup() request.addfinalizer(cleanup) -@pytest.fixture -def needs_launch(app, cleanup_binder_pods): - """Fixture to skip tests if launch is unavailable""" - if not BINDER_URL and not app.hub_url: - raise pytest.skip("test requires launcher (jupyterhub)") - - @pytest.fixture(scope='session') def cleanup_build_pods(request): - if not KUBERNETES_AVAILABLE: - # kubernetes not available, nothing to do + """Cleanup running build pods at the beginning and end of a session.""" + if not K8S_AVAILABLE: return - kube = kubernetes.client.CoreV1Api() - try: - kube.create_namespace( - kubernetes.client.V1Namespace(metadata={'name': TEST_NAMESPACE}) - ) - except kubernetes.client.rest.ApiException as e: - # ignore 409: already exists - if e.status != 409: - raise - def cleanup(): - return cleanup_pods(TEST_NAMESPACE, - {'component': 'binderhub-build'}) + return cleanup_pods({'component': 'binderhub-build'}) cleanup() request.addfinalizer(cleanup) +@pytest.fixture +def needs_launch(app, cleanup_binder_pods): + """Fixture to skip tests if launch is unavailable""" + if not BINDER_URL and not app.hub_url: + raise pytest.skip("test requires launcher (jupyterhub)") + + @pytest.fixture def needs_build(app, cleanup_build_pods): """Fixture to skip tests if build is unavailable""" diff --git a/binderhub/tests/test_auth.py b/binderhub/tests/test_auth.py index 243a00b75..51462eab2 100644 --- a/binderhub/tests/test_auth.py +++ b/binderhub/tests/test_auth.py @@ -25,7 +25,7 @@ def use_session(): ], indirect=['app'] # send param True to app fixture, so that it loads authentication configuration ) -@pytest.mark.auth_test +@pytest.mark.auth async def test_auth(app, path, authenticated, use_session): url = f'{app.url}{path}' r = await async_requests.get(url) diff --git a/ci/common b/ci/common new file mode 100755 index 000000000..229459412 --- /dev/null +++ b/ci/common @@ -0,0 +1,90 @@ +#!/bin/sh +# Use https://www.shellcheck.net/ to reduce mistakes if you make changes to this file. + +remove_docker_mirror_on_travis() { + # This is a workaround to an issue caused by the existence of a docker + # registry mirror in our CI environment. Without this fix that removes the + # mirror, chartpress fails to realize the existence of already built images + # and rebuilds them. + # + # ref: https://github.com/moby/moby/issues/39120 + sudo cat /etc/docker/daemon.json + echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json + sudo systemctl restart docker + docker ps -a +} + +setup_kubectl () { + echo "setup kubectl" + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v"${KUBE_VERSION}"/bin/linux/amd64/kubectl + chmod +x kubectl + sudo mv kubectl /usr/local/bin/ +} + +setup_helm () { + echo "setup helm ${HELM_VERSION}" + curl -sf https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | DESIRED_VERSION=v${HELM_VERSION} bash +} + +setup_minikube () { + # install conntrack for minikube with k8s 1.18.2 + # install libgnutls28-dev for pycurl + sudo apt-get update + sudo apt-get -y install conntrack libgnutls28-dev + + echo "setup minikube" + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v"${MINIKUBE_VERSION}"/minikube-linux-amd64 + chmod +x minikube + sudo mv minikube /usr/local/bin/ + + echo "start minikube" + sudo CHANGE_MINIKUBE_NONE_USER=true minikube start --vm-driver=none --kubernetes-version=v"${KUBE_VERSION}" + minikube update-context +} + +await_jupyterhub() { + kubectl rollout status --watch --timeout 300s deployment/proxy \ + && kubectl rollout status --watch --timeout 300s deployment/hub \ + && ( + if kubectl get deploy/autohttps > /dev/null 2>&1; then + kubectl rollout status --watch --timeout 300s deployment/autohttps || exit 1 + fi + ) +} + +await_binderhub() { + await_jupyterhub + kubectl rollout status --watch --timeout 300s deployment/binder +} + +full_namespace_report () { + # list config (secret,configmap) + kubectl get secret,cm + # list networking (service,ingress) + kubectl get svc,ing + # list workloads (deployment,statefulset,daemonset,pod) + kubectl get deploy,sts,ds,pod + + # if any pod has any non-ready -> show its containers' logs + kubectl get pods -o json \ + | jq ' + .items[] + | select( + any(.status.containerStatuses[]?; .ready == false) + ) + | .metadata.name' \ + | xargs --max-args 1 --no-run-if-empty \ + sh -c 'printf "\nPod with non-ready container detected\n - Logs of $0:\n"; kubectl logs --all-containers $0' + + # if any pods that should be scheduled by the user-scheduler are pending -> + # show user-scheduler's logs + ( + kubectl get pods -l "component in (user-placeholder,singleuser-server)" -o json \ + | jq -r ' + .items[] + | select(.status.phase == "Pending") + | .metadata.name + ' + ) | xargs --max-args 1 --no-run-if-empty --max-lines \ + sh -c 'printf "\nPending user pod detected ($0)\n - Logs of deploy/user-scheduler:\n"; kubectl logs --all-containers deploy/user-scheduler' +} diff --git a/ci/install.sh b/ci/install.sh deleted file mode 100755 index 77633f60e..000000000 --- a/ci/install.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -ex - -mkdir -p bin - -# install conntrack for minikube with k8s 1.18.2 -# install libgnutls28-dev for pycurl -# install socat for helm2 -sudo apt-get update -sudo apt-get -y install conntrack libgnutls28-dev socat - -# install kubectl, minikube -# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube -echo "installing kubectl" -curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl -chmod +x kubectl -mv kubectl bin/ - -echo "installing minikube" -curl -Lo minikube https://storage.googleapis.com/minikube/releases/v${MINIKUBE_VERSION}/minikube-linux-amd64 -chmod +x minikube -mv minikube bin/ - -echo "starting minikube with RBAC" -sudo CHANGE_MINIKUBE_NONE_USER=true $PWD/bin/minikube start --vm-driver=none --kubernetes-version=v${KUBE_VERSION} -minikube update-context - -echo "waiting for kubernetes" -JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' -until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do - sleep 1 -done -kubectl get nodes - -# create clusterrolebinding needed for RBAC -kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default - -echo "installing helm" -curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ - | tar -xz -C bin --strip-components 1 linux-amd64/helm -chmod +x bin/helm - -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - -echo "waiting for tiller" -kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy - -echo "installing git-crypt" -curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt -echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - -chmod +x bin/git-crypt diff --git a/ci/publish b/ci/publish new file mode 100755 index 000000000..39864ed76 --- /dev/null +++ b/ci/publish @@ -0,0 +1,42 @@ +#!/bin/bash +# This script publishes the Helm chart to the JupyterHub Helm chart repo and +# pushes associated built docker images to Docker hub using chartpress. +# -------------------------------------------------------------------------- + +# Exit on error and raise an error for unset environment variables +set -eu + +# Decrypt a private SSH key having its public key registered on GitHub. It will +# be used to establish an identity with rights to push to the git repository +# hosting our Helm charts: https://github.com/jupyterhub/helm-chart +openssl aes-256-cbc -K $encrypted_d8355cc3d845_key -iv $encrypted_d8355cc3d845_iv -in ci/publish-id_rsa.enc -out ci/id_rsa -d +chmod 0400 ci/id_rsa + +# Configure docker with credentials to let chartpress push images to docker hub. +docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" + +# Activate logging of bash commands now that the sensitive stuff is done +set -x + +# As chartpress uses git to push to our Helm chart repository, we configure +# git ahead of time to use the identity we decrypted earlier. +export GIT_SSH_COMMAND="ssh -i ${PWD}/ci/id_rsa" + +if [ "${TRAVIS_TAG:-}" == "" ]; then + # Using --long, we are ensured to get a build suffix, which ensures we don't + # build the same tag twice. Using --extra-message, we help automation like + # henchbot to submit update PRs to jupyterhub/mybinder.org-deploy. + # + # ref: https://github.com/jupyterhub/chartpress#usage + # ref: https://github.com/henchbot/mybinder.org-upgrades + LATEST_COMMIT_TITLE=$(git log -1 --pretty=%B | head -n1 | sed 's/^.*\(#[0-9]*\).*/\1/') + chartpress --push --publish-chart --long --extra-message "${TRAVIS_REPO_SLUG}${LATEST_COMMIT_TITLE}" +else + # Setting a tag explicitly enforces a rebuild if this tag had already been + # built and we wanted to override it. + chartpress --push --publish-chart --tag "${TRAVIS_TAG}" +fi + +# Let us log the changes chartpress did, it should include replacements for +# fields in values.yaml, such as what tag for various images we are using. +git --no-pager diff diff --git a/ci/test-auth b/ci/test-auth deleted file mode 100755 index 2db0a4054..000000000 --- a/ci/test-auth +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -ex -./testing/minikube/install-hub --auth -# DEBUG: give the hub a chance to wake up -sleep 30 -export ASYNC_TEST_TIMEOUT=15 -pytest -m "auth_test" --log-cli-level=10 -vsx --cov binderhub -helm delete --purge binder-test-hub diff --git a/ci/test-helm b/ci/test-helm deleted file mode 100755 index c27c45cc7..000000000 --- a/ci/test-helm +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# test helm deployment -# - build and install helm chart -# - run tests marked with 'remote' - -set -ex - -# Is there a standard interface name? -for iface in eth0 ens4 enp0s3; do - IP=$(ifconfig $iface | grep 'inet' | cut -d: -f2 | awk '{print $2}'); - if [ -n "$IP" ]; then - echo "IP: $IP" - break - fi -done -if [ -z "$IP" ]; then - echo "Failed to get IP, current interfaces:" - ifconfig -a - exit 2 -fi -export BINDER_TEST_URL=http://$IP:30901 -export HUB_URL=http://$IP:30902 - -cat < helm-chart/travis-binder.yaml -config: - BinderHub: - hub_url: http://$IP:30902 -EOF - -if [[ ! -z "$GITHUB_ACCESS_TOKEN" ]]; then - cat <> helm-chart/travis-binder.yaml - GitHubRepoProvider: - access_token: '$GITHUB_ACCESS_TOKEN' -EOF -fi - -# FIXME: Move to a dedicated build stage in travis like in z2jh -# -# ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/.travis.yml -if [[ "$TRAVIS_PULL_REQUEST" == "false" && "$TRAVIS_BRANCH" == "master" ]]; then - # Decrypt a private SSH key having its public key registered on GitHub. It will - # be used to establish an identity with rights to push to the git repository - # hosting our Helm charts: https://github.com/jupyterhub/helm-chart - openssl aes-256-cbc -K $encrypted_d8355cc3d845_key -iv $encrypted_d8355cc3d845_iv -in ci/publish-id_rsa.enc -out ci/id_rsa -d - chmod 0400 ci/id_rsa - # As chartpress uses git to push to our Helm chart repository, we configure - # git ahead of time to use the identity we decrypted earlier. - export GIT_SSH_COMMAND="ssh -i ${PWD}/ci/id_rsa" - - # As chartpress will push images to docker hub we need to authenticate - # ourselves. - docker login -u ${DOCKER_USERNAME} -p "${DOCKER_PASSWORD}" - - if [ "${TRAVIS_TAG:-}" == "" ]; then - # Using --long, we are ensured to get a build suffix, which ensures we don't - # build the same tag twice. The --extra-message influences the additional - # text to the commit message on the gh-pages branch of - # jupyterhub/helm-chart. - # - # ref: https://github.com/jupyterhub/helm-chart - PUSH="--push --publish-chart --long --extra-message \"${TRAVIS_REPO_SLUG}$(git log -1 --pretty=%B | head -n1 | sed 's/^.*\(#[0-9]*\).*/\1/')\"" - else - # Setting a tag explicitly enforces a rebuild if this tag had already been - # built and we wanted to override it. - PUSH="--push --publish-chart --tag ${TRAVIS_TAG}" - fi -else - PUSH="" -fi - - - -echo "building helm chart" -cd helm-chart -helm repo add jupyterhub https://jupyterhub.github.io/helm-chart -helm dependency update binderhub -chartpress ${PUSH} -cd .. -# git diff will show us the result of the chartpress render. -# This should only include the tags for chartpress images. -git diff -docker images | sort - -# smoke test helm install -echo "installing binderhub helm chart" - -cat helm-chart/travis-binder.yaml - -helm install \ - --name binder-test \ - --namespace binder-test-helm \ - helm-chart/binderhub \ - -f helm-chart/minikube-binder.yaml \ - -f helm-chart/travis-binder.yaml - -# wait for helm deploy to come up -echo "waiting for pods to become ready" -for i in {1..100}; do - kubectl get pod --all-namespaces - kubectl --namespace=$BINDER_TEST_NAMESPACE get deployment -o name | xargs -L1 kubectl --namespace=$BINDER_TEST_NAMESPACE rollout status && break - sleep 3 -done -kubectl --namespace=$BINDER_TEST_NAMESPACE get deployment -o name | xargs -L1 kubectl --namespace=$BINDER_TEST_NAMESPACE rollout status - - -echo "waiting for servers to become responsive" -until curl --fail $BINDER_TEST_URL > /dev/null; do - sleep 1 -done -until curl --fail $HUB_URL> /dev/null; do - sleep 1 -done - -echo "running tests against $BINDER_TEST_URL" -# run a few tests again, this time against the helm-installed binder -sleep 10 -export ASYNC_TEST_TIMEOUT=15 -mark="remote" -if [[ -z "$GITHUB_ACCESS_TOKEN" ]]; then - mark="$mark and not github_api" -fi -pytest -vsx -m "$mark" --log-cli-level=10 --cov binderhub -helm delete --purge binder-test diff --git a/ci/test-main b/ci/test-main deleted file mode 100755 index 7be60a129..000000000 --- a/ci/test-main +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# main test script: runs the basic Python tests, -# with JupyterHub launched via helm - -# The tests do not do well if there are pods from a previous test run. -# The 'helm delete' line at the end of this script is not guaranteed -# to execute if there are failures. So do it here too just to be sure. -x=$(helm ls --namespace binder-test-hub) -if [ ! -z "$x" ]; then - helm delete --purge binder-test-hub -fi -set -ex -./testing/minikube/install-hub -# DEBUG: give the hub a chance to wake up -sleep 10 -export ASYNC_TEST_TIMEOUT=15 -pytest -m "not auth_test" --log-cli-level=10 -vsx --cov binderhub -helm delete --purge binder-test-hub diff --git a/dev-requirements.txt b/dev-requirements.txt index d6a135a26..cd0363586 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4 -chartpress==0.6.* +chartpress==0.7.* click codecov html5lib diff --git a/doc/zero-to-binderhub/setup-prerequisites.rst b/doc/zero-to-binderhub/setup-prerequisites.rst index beffb4ec2..7cb1780bd 100644 --- a/doc/zero-to-binderhub/setup-prerequisites.rst +++ b/doc/zero-to-binderhub/setup-prerequisites.rst @@ -3,152 +3,62 @@ Set up the prerequisites ======================== -BinderHub is built to run on top of Kubernetes, a distributed cluster manager. -It uses a JupyterHub to launch and manage user servers, as well as a -docker registry to cache images. +BinderHub is built to run in a `Kubernetes cluster `_. It +relies on JupyterHub to launch and manage user servers, as well as a docker +registry to cache docker images it builds. -To create your own BinderHub, you'll first need to set up a Kubernetes Cluster -and then configure its various components correctly. The following instructions -will assist you in doing so. - -.. note:: - - BinderHub uses a JupyterHub running on Kubernetes for much of its functionality. - For information on setting up and customizing your JupyterHub, we recommend reading - the `Zero to JupyterHub Guide `_. +To deploy your own BinderHub, you'll first need to set up a Kubernetes cluster. +The following instructions will assist you in doing so. Setting up a Kubernetes cluster ------------------------------- -.. note:: - - BinderHub is built to be cloud agnostic, and can run on various cloud - providers (as well as bare metal). If you would like to help with adding - instructions for other cloud providers or improving the current instructions, - `please contact us `_! - -First, deploy a Kubernetes cluster by following the -`instructions in the Zero to JupyterHub guide `_. +First, deploy a Kubernetes cluster by following the `instructions in the Zero to +JupyterHub guide +`_. When you're done, move on to the next section. - Installing Helm --------------- `Helm `_, the package manager for Kubernetes, is a useful tool for: installing, upgrading and managing applications on a Kubernetes cluster. -Helm packages are called *charts*. -We will be installing and managing JupyterHub on -our Kubernetes cluster using a Helm chart. - -Charts are abstractions describing how to install packages onto a Kubernetes -cluster. When a chart is deployed, it works as a templating engine to populate -multiple `yaml` files for package dependencies with the required variables, and -then runs `kubectl apply` to apply the configuration to the resource and install -the package. - -Helm has two parts: a client (`helm`) and a server (`tiller`). Tiller runs -inside of your Kubernetes cluster as a pod in the kube-system namespace. Tiller -manages both, the *releases* (installations) and *revisions* (versions) of charts deployed -on the cluster. When you run `helm` commands, your local Helm client sends -instructions to `tiller` in the cluster that in turn make the requested changes. - - -.. note:: +Helm packages are called *charts*. We will be installing and managing JupyterHub +on our Kubernetes cluster using a Helm chart. - These instructions are for Helm 2. - Helm 3 includes several major breaking changes and is not yet officially - supported +A Helm *chart* is mostly Helm *templates* and default *values* that are used to +render the templates into valid k8s resources. Each installation of a chart is +called a *release*, and each version of the release is called a *revision*. Several `methods to install Helm -`_ exist, the -simplest way to install Helm is to run Helm's installer script in a terminal: - -.. code:: bash - - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash - -Initializing Helm -~~~~~~~~~~~~~~~~~ - -After installing helm on your machine, initialize Helm on your Kubernetes -cluster: - -1. Set up a `ServiceAccount - `_ - for use by `tiller`. - - .. code-block:: bash - - kubectl --namespace kube-system create serviceaccount tiller - -2. Give the `ServiceAccount` full permissions to manage the cluster. - - .. code-block:: bash - - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - -3. Initialize `helm` and `tiller`. - - .. code-block:: bash - - helm init --service-account tiller --history-max 100 --wait - - This command only needs to run once per Kubernetes cluster, it will create a - `tiller` deployment in the kube-system namespace and setup your local `helm` - client. - This command installs and configures the `tiller` part of Helm (the whole - project, not the CLI) on the remote kubernetes cluster. Later when you want - to deploy changes with `helm` (the local CLI), it will talk to `tiller` - and tell it what to do. `tiller` then executes these instructions from - within the cluster. - We limit the history to 100 previous installs as very long histories slow - down helm commands a lot. - - .. note:: - - If you wish to install `helm` on another computer, you won't need to setup - `tiller` again but you still need to initialize `helm`: - - .. code-block:: bash - - helm init --client-only - -Securing Helm -~~~~~~~~~~~~~ - -Ensure that `tiller` is secured against access from inside the cluster: +`_ exist, the simplest +way to install Helm is to run Helm's installer script in a terminal. .. code:: bash - kubectl patch deployment tiller-deploy --namespace=kube-system --type=json --patch='[{"op": "add", "path": "/spec/template/spec/containers/0/command", "value": ["/tiller", "--listen=localhost:44134"]}]' - -By default `tiller`'s port is exposed in the cluster without authentication and -if you probe this port directly (i.e. by bypassing `helm`) then `tiller`'s -permissions can be exploited. This step forces `tiller` to listen to commands -from localhost (i.e. `helm`) *only* so that other pods inside the cluster cannot -ask `tiller` to install a new chart granting them arbitrary, elevated RBAC -privileges which they could then exploit. -`More details here. `_ + curl -sf https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 Verifying the setup ~~~~~~~~~~~~~~~~~~~ -You can verify that you have the correct version and that it installed properly -by running: +Verify that you have installed helm, kubectl, and have an ability to communicate +with your Kubernetes cluster. .. code:: bash helm version -It should provide output like below. If you just installed everything for the -first time it might take one or two minutes to show the output. Make sure you -have at least version 2.11.0 and that the client (`helm`) and server -version (`tiller`) match! +.. code-block:: bash + + version.BuildInfo{Version:"v3.4.0", GitCommit:"7090a89efc8a18f3d8178bf47d2462450349a004", GitTreeState:"clean", GoVersion:"go1.14.10"} + +.. code-block:: bash + + kubectl version .. code-block:: bash - Client: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"} - Server: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"} + Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.3", GitCommit:"1e11e4a2108024935ecfcb2912226cedeafd99df", GitTreeState:"clean", BuildDate:"2020-10-14T12:50:19Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"linux/amd64"} + Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.2", GitCommit:"f5743093fd1c663cb0cbc89748f730662345d44d", GitTreeState:"clean", BuildDate:"2020-09-16T13:32:58Z", GoVersion:"go1.15", Compiler:"gc", Platform:"linux/amd64"} Now that you've installed Kubernetes and Helm, it's time to :ref:`setup-registry`. diff --git a/helm-chart/chartpress.yaml b/helm-chart/chartpress.yaml index 305cc2a0a..e6a0de970 100644 --- a/helm-chart/chartpress.yaml +++ b/helm-chart/chartpress.yaml @@ -15,15 +15,22 @@ charts: image-cleaner: valuesPath: imageCleaner.image binderhub: + # We will not use the default build contextPath, and must therefore + # specify the dockerfilePath explicitly. + dockerfilePath: images/binderhub/Dockerfile # Context to send to docker build for use by the Dockerfile. We pass the # root folder in order to allow the image to access and build the python - # package. Since we do that, chartpress will always react to changes in - # documentation and other things, and always consider the chart version - # to change along with the image version. + # package. contextPath: .. - # Since we changed the contextPath, we must also change the - # dockerfilePath. This is because chartpress assume the Dockerfile will - # reside in the contextPath folder, and since we overrode the default of - # images/binderhub it will be the wrong folder. - dockerfilePath: images/binderhub/Dockerfile + # To avoid chartpress to react to changes in documentation and other + # things, we ask it to not trigger on changes to the contextPath, which + # means we manually should add paths rebuild should be triggered on + rebuildOnContextPathChanges: false + # We manually specify the paths which chartpress should monitor for + # changes that should trigger a rebuild of this image. + paths: + - images/binderhub/requirements.txt + - ../setup.py + - ../package.json + - ../binderhub valuesPath: image diff --git a/helm-chart/minikube-binder.yaml b/helm-chart/minikube-binder.yaml deleted file mode 100644 index 2ce02058f..000000000 --- a/helm-chart/minikube-binder.yaml +++ /dev/null @@ -1,50 +0,0 @@ -rbac: - enabled: true - -resources: - requests: - cpu: 0.2 - memory: 100Mi - -cors: &cors - allowOrigin: '*' - -service: - type: NodePort - nodePort: 30901 - -config: - BinderHub: - hub_url: http://192.168.99.100:30902 - use_registry: false - log_level: 10 - -jupyterhub: - custom: - cors: *cors - rbac: - enabled: true - hub: - resources: - requests: - cpu: 0.2 - memory: 100Mi - cookieSecret: 1470700e01f77171c2c67b12130c25081dfbdf2697af8c2f2bd05621b31100bf - db: - type: sqlite-memory - services: - binder: - apiToken: 0c18e3dcb7c55b8c7740f1d7ee6977510ce3cb22221669278ee03f3c2259ab6b - - proxy: - secretToken: f89ddee5ba10f2268fcefcd4e353235c255493095cd65addf29ebebf8df86255 - service: - type: NodePort - nodePorts: - http: 30902 - - singleuser: - storage: - type: none - memory: - guarantee: null diff --git a/testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml b/testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml new file mode 100644 index 000000000..bbc32a6eb --- /dev/null +++ b/testing/k8s-binder-k8s-hub/binderhub-chart-config.yaml @@ -0,0 +1,45 @@ +cors: + allowOrigin: '*' + +service: + type: NodePort + nodePort: 30901 + +config: + BinderHub: + hub_url: http://192.168.99.100:30902 + use_registry: false + log_level: 10 + +# NOTE: This is a mirror of the jupyterhub section in +# jupyterhub-chart-config.yaml in testing/local-binder-k8s-hub, keep these +# two files synced please. +jupyterhub: + debug: + enabled: true + + custom: + cors: + allowOrigin: "*" + + hub: + # cookieSecret must be a hex encoded even length string + cookieSecret: "cccccccccc" + db: + type: "sqlite-memory" + services: + binder: + apiToken: "dummy-binder-secret-token" + + proxy: + secretToken: "dummy-proxy-secret-token" + service: + type: NodePort + nodePorts: + http: 30902 + + singleuser: + storage: + type: none + memory: + guarantee: null diff --git a/testing/local-binder-k8s-hub/binderhub_config.py b/testing/local-binder-k8s-hub/binderhub_config.py new file mode 100644 index 000000000..7de832a11 --- /dev/null +++ b/testing/local-binder-k8s-hub/binderhub_config.py @@ -0,0 +1,18 @@ +# A development config to test a BinderHub deployment generally. It can be +# combined with with the auth specific config. + +# Deployment assumptions: +# - BinderHub: standalone local installation +# - JupyterHub: standalone k8s installation + +import os +import subprocess +try: + minikube_ip = subprocess.check_output(['minikube', 'ip'], text=True).strip() +except (subprocess.SubprocessError, FileNotFoundError): + minikube_ip = '192.168.1.100' + +c.BinderHub.debug = True +c.BinderHub.hub_url = 'http://{}:30902'.format(minikube_ip) +c.BinderHub.hub_api_token = 'dummy-binder-secret-token' +c.BinderHub.use_registry = False diff --git a/testing/local-binder-k8s-hub/binderhub_config_auth_additions.py b/testing/local-binder-k8s-hub/binderhub_config_auth_additions.py new file mode 100644 index 000000000..2595a4a60 --- /dev/null +++ b/testing/local-binder-k8s-hub/binderhub_config_auth_additions.py @@ -0,0 +1,28 @@ +# A development config to test a BinderHub deployment that is relying on +# JupyterHub's as an OAuth2 based Identity Provider (IdP) for Authentication and +# Authorization. JupyterHub is configured with its own Authenticator. + +# Deployment assumptions: +# - BinderHub: standalone local installation +# - JupyterHub: standalone k8s installation + +import os +from urllib.parse import urlparse + +# As this config file reference traitlet values (c.BinderHub.hub_api_token, +# c.BinderHub.hub_url) set in the more general config file, it must load the +# more general config file first. +here = os.path.abspath(os.path.dirname(__file__)) +load_subconfig(os.path.join(here, 'binderhub_config.py')) + +# Additional auth related configuration +c.BinderHub.base_url = '/' +c.BinderHub.auth_enabled = True +url = urlparse(c.BinderHub.hub_url) +c.HubOAuth.hub_host = f'{url.scheme}://{url.netloc}' +c.HubOAuth.api_token = c.BinderHub.hub_api_token +c.HubOAuth.api_url = c.BinderHub.hub_url + '/hub/api/' +c.HubOAuth.base_url = c.BinderHub.base_url +c.HubOAuth.hub_prefix = c.BinderHub.base_url + 'hub/' +c.HubOAuth.oauth_redirect_uri = 'http://127.0.0.1:8585/oauth_callback' +c.HubOAuth.oauth_client_id = 'binder-oauth-client-test' diff --git a/testing/local-binder-k8s-hub/install-jupyterhub-chart b/testing/local-binder-k8s-hub/install-jupyterhub-chart new file mode 100755 index 000000000..fb781cfed --- /dev/null +++ b/testing/local-binder-k8s-hub/install-jupyterhub-chart @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +""" +Makes a standalone installation of the JupyterHub Helm chart of the version +specified in the BinderHub Helm chart requirements.yaml file, and use the +configuration for the JupyterHub Helm chart nested in the BinderHub helm chart's +configuration. +""" +from subprocess import check_call +import os +import sys + +from ruamel import yaml +from tempfile import NamedTemporaryFile + +here = os.path.abspath(os.path.dirname(__file__)) +helm_chart = os.path.join(here, os.pardir, os.pardir, 'helm-chart') + + +def _get_jupyterhub_dependency_version(): + """ + Extract JupyterHub Helm chart version from the BinderHub chart's + requirements.yaml + """ + requirements_yaml = os.path.join(helm_chart, 'binderhub', 'requirements.yaml') + + with open(requirements_yaml) as f: + requirements = yaml.safe_load(f) + for dep in requirements['dependencies']: + if dep['name'] == 'jupyterhub': + return dep['version'] + else: + raise ValueError(f"JupyterHub as a Helm chart dependency not found in {requirements_yaml}:\n{requirements}") + + +with NamedTemporaryFile(mode="w") as tmp: + with open(os.path.join(helm_chart, 'binderhub', 'values.yaml')) as values_in: + yaml.safe_dump(yaml.safe_load(values_in)['jupyterhub'], tmp.file) + + cmd = ['helm', 'upgrade', '--install', 'binderhub-test'] + cmd.extend([ + 'jupyterhub/jupyterhub', + f'--version={_get_jupyterhub_dependency_version()}', + f'--values={tmp.name}', + f'--values={os.path.join(here, "jupyterhub-chart-config.yaml")}', + ]) + if '--auth' in sys.argv: + cmd.extend([ + f'--values={os.path.join(here, "jupyterhub-chart-config-auth-additions.yaml")}' + ]) + print("Installing the JupyterHub Helm chart by itself") + print(" ".join(cmd)) + check_call(cmd) diff --git a/testing/minikube/jupyterhub-helm-auth-config.yaml b/testing/local-binder-k8s-hub/jupyterhub-chart-config-auth-additions.yaml similarity index 61% rename from testing/minikube/jupyterhub-helm-auth-config.yaml rename to testing/local-binder-k8s-hub/jupyterhub-chart-config-auth-additions.yaml index 85a502ff1..4aa4948f6 100644 --- a/testing/minikube/jupyterhub-helm-auth-config.yaml +++ b/testing/local-binder-k8s-hub/jupyterhub-chart-config-auth-additions.yaml @@ -1,19 +1,17 @@ -cull: - users: false +# A JupyterHub Helm chart config containing only auth relevant config, and is +# meant to be used alongside another configuration. + +custom: + binderauth_enabled: true + hub: services: binder: oauth_no_confirm: true oauth_redirect_uri: "http://127.0.0.1:8585/oauth_callback" oauth_client_id: "binder-oauth-client-test" -custom: - binderauth_enabled: true - -singleuser: - # to make notebook servers aware of hub - cmd: jupyterhub-singleuser auth: type: dummy dummy: - password: 'dummy' + password: dummy diff --git a/testing/local-binder-k8s-hub/jupyterhub-chart-config.yaml b/testing/local-binder-k8s-hub/jupyterhub-chart-config.yaml new file mode 100644 index 000000000..863c4d2a3 --- /dev/null +++ b/testing/local-binder-k8s-hub/jupyterhub-chart-config.yaml @@ -0,0 +1,34 @@ +# A JupyterHub Helm chart config for use whenever JupyterHub is deployed by +# itself. + +# NOTE: This is a mirror of the jupyterhub section in +# binderhub-chart-config.yaml in testing/k8s-binder-k8s-hub, keep these +# two files synced please. +debug: + enabled: true + +custom: + cors: + allowOrigin: "*" + +hub: + # cookieSecret must be a hex encoded even length string + cookieSecret: "cccccccccc" + db: + type: "sqlite-memory" + services: + binder: + apiToken: "dummy-binder-secret-token" + +proxy: + secretToken: "dummy-proxy-secret-token" + service: + type: NodePort + nodePorts: + http: 30902 + +singleuser: + storage: + type: none + memory: + guarantee: null diff --git a/testing/local-binder-mocked-hub/binderhub_config.py b/testing/local-binder-mocked-hub/binderhub_config.py new file mode 100644 index 000000000..ae1324118 --- /dev/null +++ b/testing/local-binder-mocked-hub/binderhub_config.py @@ -0,0 +1,18 @@ +# A development config to test BinderHub's UI. The image building or the +# subsequent launching of the built image in a JupyterHub is mocked so that +# users get stuck forever waiting for a build to complete. + +# Deployment assumptions: +# - BinderHub: standalone local installation +# - JupyterHub: mocked + +from binderhub.repoproviders import FakeProvider + +c.BinderHub.debug = True +c.BinderHub.use_registry = False +c.BinderHub.builder_required = False +c.BinderHub.repo_providers = {'gh': FakeProvider} +c.BinderHub.tornado_settings.update({'fake_build': True}) + +c.BinderHub.about_message = "Hello world." +c.BinderHub.banner_message = 'This is headline news.' diff --git a/testing/localonly/binderhub_config.py b/testing/localonly/binderhub_config.py deleted file mode 100644 index 2e3bfeca3..000000000 --- a/testing/localonly/binderhub_config.py +++ /dev/null @@ -1,10 +0,0 @@ -from binderhub.repoproviders import FakeProvider - -c.BinderHub.use_registry = False -c.BinderHub.builder_required = False -c.BinderHub.repo_providers = {'gh': FakeProvider} -c.BinderHub.tornado_settings.update({'fake_build':True}) - -c.BinderHub.about_message = "Hello world." - -c.BinderHub.banner_message = 'This is headline news.' diff --git a/testing/minikube/binderhub_auth_config.py b/testing/minikube/binderhub_auth_config.py deleted file mode 100644 index 249840d23..000000000 --- a/testing/minikube/binderhub_auth_config.py +++ /dev/null @@ -1,16 +0,0 @@ -from urllib.parse import urlparse -import os -here = os.path.abspath(os.path.dirname(__file__)) -load_subconfig(os.path.join(here, 'binderhub_config.py')) - -c.BinderHub.base_url = '/' -c.BinderHub.auth_enabled = True -# configuration for authentication -hub_url = urlparse(c.BinderHub.hub_url) -c.HubOAuth.hub_host = '{}://{}'.format(hub_url.scheme, hub_url.netloc) -c.HubOAuth.api_token = c.BinderHub.hub_api_token -c.HubOAuth.api_url = c.BinderHub.hub_url + '/hub/api/' -c.HubOAuth.base_url = c.BinderHub.base_url -c.HubOAuth.hub_prefix = c.BinderHub.base_url + 'hub/' -c.HubOAuth.oauth_redirect_uri = 'http://127.0.0.1:8585/oauth_callback' -c.HubOAuth.oauth_client_id = 'binder-oauth-client-test' diff --git a/testing/minikube/binderhub_config.py b/testing/minikube/binderhub_config.py deleted file mode 100644 index 19ba44cf6..000000000 --- a/testing/minikube/binderhub_config.py +++ /dev/null @@ -1,11 +0,0 @@ -# config file for testing with minikube-config.yaml -import subprocess -try: - minikube_ip = subprocess.check_output(['minikube', 'ip']).decode('utf-8').strip() -except (subprocess.SubprocessError, FileNotFoundError): - minikube_ip = '192.168.1.100' - -c.BinderHub.hub_url = 'http://{}:30123'.format(minikube_ip) -c.BinderHub.hub_api_token = 'aec7d32df938c0f55e54f09244a350cb29ea612907ed4f07be13d9553d18a8e4' -c.BinderHub.use_registry = False -c.BinderHub.build_namespace = 'binder-test' diff --git a/testing/minikube/install-hub b/testing/minikube/install-hub deleted file mode 100755 index 246973133..000000000 --- a/testing/minikube/install-hub +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -""" -Script to install jupyterhub helm chart with minikube - -for testing binderhub - -Gets the jupyterhub chart version from the binderhub helm chart -to ensure we are testing against a reasonable version. -""" -import sys -import os -import pipes -from subprocess import check_call, check_output -import time - -from kubernetes import client, config -from ruamel import yaml -from tempfile import NamedTemporaryFile - -auth_enabled = '--auth' in sys.argv - -namespace = os.environ.get('BINDER_TEST_NAMESPACE') or 'binder-test' -name = 'binder-test-hub' - -here = os.path.abspath(os.path.dirname(__file__)) -helm_chart = os.path.join(here, os.pardir, os.pardir, 'helm-chart') -requirements_yaml = os.path.join(helm_chart, 'binderhub', 'requirements.yaml') -values_yaml = os.path.join(helm_chart, 'binderhub', 'values.yaml') - -def get_hub_chart_dependency(): - """Get the JupyterHub chart info from the binderhub chart requirements.yaml""" - with open(requirements_yaml) as f: - requirements = yaml.safe_load(f) - for dep in requirements['dependencies']: - if dep['name'] == 'jupyterhub': - return dep - else: - raise ValueError("Couldn't find JupyterHub in %s:\n%s" % - (requirements_yaml, requirements) - ) - -jupyterhub = get_hub_chart_dependency() - -# update the helm repo -check_call(['helm', 'repo', 'add', 'jupyterhub', jupyterhub['repository']]) -check_call(['helm', 'repo', 'update', 'jupyterhub']) - -# Deploying BinderHub normally automatically deploys JupyterHub from the same -# configuration file. -# In the CI tests JupyterHub is configured independently, so extract the -# JupyterHub config from the BinderHub helm chart values.yaml -tmp = NamedTemporaryFile(suffix='.yaml', delete=False) -tmp.close() -jupyterhub_values_yaml = tmp.name -with open(values_yaml) as valuesin: - jupyterhub_values = yaml.safe_load(valuesin)['jupyterhub'] - with open(jupyterhub_values_yaml, 'w') as valuesout: - yaml.safe_dump(jupyterhub_values, valuesout) - -# launch with helm install (or upgrade, if already installed) -args = [ - 'jupyterhub/jupyterhub', - f'--version={jupyterhub["version"]}', - f'--namespace={namespace}', - '-f', jupyterhub_values_yaml, - '-f', os.path.join(here, 'jupyterhub-helm-config.yaml'), -] -if auth_enabled: - print('\nAuthentication is enabled') - auth_conf_file = os.path.join(here, 'jupyterhub-helm-auth-config.yaml') - args.extend(['-f', auth_conf_file]) - -is_running = name in check_output(['helm', 'list', '-q']).decode('utf8', 'replace').split() -if is_running: - cmd = ['helm', 'upgrade', name] -else: - cmd = ['helm', 'install', f'--name={name}'] - -cmd.extend(args) -print("\n %s\n" % ' '.join(map(pipes.quote, cmd))) - -check_call(cmd) - - -# wait for deployment to be running via kube API - -config.load_kube_config() -kube = client.CoreV1Api() - -def wait_for(f, msg, timeout=300): - """Wait for f() to return True""" - print(f"Waiting until {msg}") - for i in range(int(timeout)): - if f(): - break - time.sleep(1) - else: - raise TimeoutError(f"{msg} did not occur in {timeout} seconds") - print(msg) - -def pods_ready(): - """ - Return whether all pods in our test namespace are ready, which is a tougher - criteria than running. - - ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions - ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#podstatus-v1-core - """ - pods = kube.list_namespaced_pod(namespace).items - - # FIXME: we could perhaps delegate this waiting for readiness to kubernetes - # api and do like in z2jh: - # https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/5703a8de9017d83242f8d4dd1ac00887c162629b/dev#L261-L266 - all_pods_ready = True - for pod in pods: - if not any( - condition.type == "Ready" and condition.status == "True" - for condition in pod.status.conditions - ): - all_pods_ready = False - print(f"{pod.status.phase}\t{pod.metadata.name}") - return all_pods_ready - -# wait until all of our pods are running and ready -try: - wait_for(pods_ready, "Hub is up") -except TimeoutError: - # show pods on timeout, in case there's a hint about what's wrong - check_call(['kubectl', 'get', 'pod', '--all-namespaces']) - raise - -os.remove(jupyterhub_values_yaml) diff --git a/testing/minikube/jupyterhub-helm-config.yaml b/testing/minikube/jupyterhub-helm-config.yaml deleted file mode 100644 index af0f54009..000000000 --- a/testing/minikube/jupyterhub-helm-config.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# This has secrets! Do not use this config for anything but local testing -# with minikube. - -# the binder apiToken bit requires jupyterhub helm-chart v0.5.x - -custom: - cors: - allowOrigin: "*" - -hub: - cookieSecret: "36091e950f98f033aeae2520e2fe4c8599bc391598f910e510ad670765fbc1ff" - db: - type: "sqlite-memory" - services: - binder: - apiToken: "aec7d32df938c0f55e54f09244a350cb29ea612907ed4f07be13d9553d18a8e4" - allowNamedServers: false - extraConfig: - 10-binder-customisations: | - # debug! - c.JupyterHub.log_level = 10 - -proxy: - # Another secret! - secretToken: "443fa28905c209eaf5803f911de7748f443c78062767d9d28d514dc4fbefd843" - service: - type: NodePort - nodePorts: - http: 30123