diff --git a/Docker/python-nginx/README.md b/Docker/python-nginx/README.md index 02c86054e..cec5bf186 100644 --- a/Docker/python-nginx/README.md +++ b/Docker/python-nginx/README.md @@ -1,3 +1,5 @@ The `python2.7-alpine3.7` image is at https://quay.io/repository/cdis/py27base The `python3.6-alpine3.7` image is at https://quay.io/repository/cdis/python-nginx + +The `python3.6-buster/Dockerfile` triggers an image build here: https://quay.io/repository/cdis/python \ No newline at end of file diff --git a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile index 580386728..06eface8f 100755 --- a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile +++ b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile @@ -1,8 +1,6 @@ # Dockerfile written by Sebastian Ramirez at https://github.com/tiangolo/uwsgi-nginx-docker -FROM quay.io/pcdc/python_3.8-alpine3_11:latest -# FROM python:3.8-alpine3.11 -# FROM quay.io/cdis/python:3.6-alpine3.7 +FROM quay.io/cdis/python:3.6-alpine3.7 # Standard set up Nginx Alpine # https://github.com/nginxinc/docker-nginx/blob/f3fc4d5753f0ebb9107738183b9c5cea1bf3f618/mainline/alpine/Dockerfile diff --git a/Docker/python-nginx/python3.6-alpine3.7/nginx.conf b/Docker/python-nginx/python3.6-alpine3.7/nginx.conf index b30885033..52976c8e8 100755 --- a/Docker/python-nginx/python3.6-alpine3.7/nginx.conf +++ b/Docker/python-nginx/python3.6-alpine3.7/nginx.conf @@ -33,7 +33,6 @@ http { '"http_useragent": "$http_user_agent", ' '"message": "$request"}'; - access_log /dev/stdout json; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' diff --git a/Docker/python-nginx/python3.6-buster/Dockerfile b/Docker/python-nginx/python3.6-buster/Dockerfile new file mode 100644 index 000000000..e2d658038 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/Dockerfile @@ -0,0 +1,168 @@ +# https://github.com/tiangolo/uwsgi-nginx-docker/blob/master/docker-images/python3.6.dockerfile +FROM quay.io/cdis/python:3.6.15-buster + +# https://github.com/nginxinc/docker-nginx/blob/f958fbacada447737319e979db45a1da49123142/mainline/debian/Dockerfile +ENV NGINX_VERSION 1.21.1 +ENV NJS_VERSION 0.6.1 +ENV PKG_RELEASE 1~buster + +RUN set -x \ +# create nginx user/group first, to be consistent throughout docker variants + && addgroup --system --gid 102 nginx \ + && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 102 nginx \ +# also add nginx user to gid 101 (ssh) groups + && adduser nginx ssh \ + && apt-get update \ + && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \ + && \ + NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \ + found=''; \ + for server in \ + ha.pool.sks-keyservers.net \ + hkp://keyserver.ubuntu.com:80 \ + hkp://p80.pool.sks-keyservers.net:80 \ + pgp.mit.edu \ + ; do \ + echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ + apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ + done; \ + test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ + apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ + && dpkgArch="$(dpkg --print-architecture)" \ + && nginxPackages=" \ + nginx=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \ + " \ + && case "$dpkgArch" in \ + amd64|i386|arm64) \ +# arches officialy built by upstream + echo "deb https://nginx.org/packages/mainline/debian/ buster nginx" >> /etc/apt/sources.list.d/nginx.list \ + && apt-get update \ + ;; \ + *) \ +# we're on an architecture upstream doesn't officially build for +# let's build binaries from the published source packages + echo "deb-src https://nginx.org/packages/mainline/debian/ buster nginx" >> /etc/apt/sources.list.d/nginx.list \ + \ +# new directory for storing sources and .deb files + && tempDir="$(mktemp -d)" \ + && chmod 777 "$tempDir" \ +# (777 to ensure APT's "_apt" user can access it too) + \ +# save list of currently-installed packages so build dependencies can be cleanly removed later + && savedAptMark="$(apt-mark showmanual)" \ + \ +# build .deb files from upstream's source packages (which are verified by apt-get) + && apt-get update \ + && apt-get build-dep -y $nginxPackages \ + && ( \ + cd "$tempDir" \ + && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \ + apt-get source --compile $nginxPackages \ + ) \ +# we don't remove APT lists here because they get re-downloaded and removed later + \ +# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies +# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) + && apt-mark showmanual | xargs apt-mark auto > /dev/null \ + && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ + \ +# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) + && ls -lAFh "$tempDir" \ + && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ + && grep '^Package: ' "$tempDir/Packages" \ + && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ +# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") +# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) +# ... +# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) + && apt-get -o Acquire::GzipIndexes=false update \ + ;; \ + esac \ + \ + && apt-get install --no-install-recommends --no-install-suggests -y \ + $nginxPackages \ + gettext-base \ + curl \ + && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ + \ +# if we have leftovers from building, let's purge them (including extra, unnecessary build deps) + && if [ -n "$tempDir" ]; then \ + apt-get purge -y --auto-remove \ + && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ + fi \ +# forward request and error logs to docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ +# create a docker-entrypoint.d directory + && mkdir /docker-entrypoint.d + +EXPOSE 80 + +# # Expose 443, in case of LTS / HTTPS +EXPOSE 443 + +# install uwsgi +# https://uwsgi-docs.readthedocs.io/en/latest/Install.html +RUN python -m pip install --upgrade pip +RUN pip install uwsgi + +# Remove default configuration from Nginx +RUN rm /etc/nginx/conf.d/default.conf +# Copy the base uWSGI ini file to enable default dynamic uwsgi process number +COPY uwsgi.ini /etc/uwsgi/ + +COPY uwsgi.conf /etc/nginx/sites-available/ + +RUN ln -s /etc/nginx/sites-available/uwsgi.conf /etc/nginx/conf.d/uwsgi.conf + +# Install Supervisord +RUN apt-get update && apt-get install -y supervisor \ +&& rm -rf /var/lib/apt/lists/* +# Custom Supervisord config +COPY supervisord.ini /etc/supervisor.d/supervisord.ini + +# Which uWSGI .ini file should be used, to make it customizable +ENV UWSGI_INI /app/uwsgi.ini + +# By default, disable uwsgi cheaper mode and run 2 processes. +# If UWSGI_CHEAPER=N and UWSGI_PROCESSES=M, N is the min and M is the max +# number of processes. UWSGI_CHEAPER must be lower than UWSGI_PROCESSES. +# We set them here instead of in uwsgi.ini so that they can be overwritten. +ENV UWSGI_CHEAPER= +ENV UWSGI_PROCESSES=2 + +# By default, allow unlimited file sizes, modify it to limit the file sizes +# To have a maximum of 1 MB (Nginx's default) change the line to: +# ENV NGINX_MAX_UPLOAD 1m +ENV NGINX_MAX_UPLOAD 0 + +# By default, Nginx will run a single worker process, setting it to auto +# will create a worker for each CPU core +ENV NGINX_WORKER_PROCESSES 1 + +# By default, Nginx listens on port 80. +# To modify this, change LISTEN_PORT environment variable. +# (in a Dockerfile or with an option for `docker run`) +ENV LISTEN_PORT 80 + +# Copy the entrypoint that will generate Nginx additional configs +COPY entrypoint.sh /entrypoint.sh +COPY logrotate-nginx.conf /etc/logrotate.d/nginx +RUN chmod +x /entrypoint.sh + +ENV PATH="/root/.cargo/bin:${PATH}" + +COPY dockerrun.sh /dockerrun.sh +RUN mkdir -p /var/www/metrics/ && chmod +x /dockerrun.sh + +ENTRYPOINT ["sh", "/entrypoint.sh"] + +# Add demo app +COPY ./app /app +WORKDIR /app + +CMD ["/usr/bin/supervisord"] \ No newline at end of file diff --git a/Docker/python-nginx/python3.6-buster/app/main.py b/Docker/python-nginx/python3.6-buster/app/main.py new file mode 100755 index 000000000..216c258f4 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/app/main.py @@ -0,0 +1,4 @@ +def application(env, start_response): + start_response('200 OK', [('Content-Type', 'text/html')]) + return [b"Hello World from a default Nginx uWSGI Python 3.6 app in a\ + Docker container (default)"] diff --git a/Docker/python-nginx/python3.6-buster/app/uwsgi.ini b/Docker/python-nginx/python3.6-buster/app/uwsgi.ini new file mode 100755 index 000000000..8a29f7350 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/app/uwsgi.ini @@ -0,0 +1,2 @@ +[uwsgi] +wsgi-file=/app/main.py diff --git a/Docker/python-nginx/python3.6-buster/dockerrun.sh b/Docker/python-nginx/python3.6-buster/dockerrun.sh new file mode 100644 index 000000000..91f79d302 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/dockerrun.sh @@ -0,0 +1,102 @@ +#!/bin/sh +# +# Note: base alpine Linux image may not include bash shell, +# and we probably want to move to that for service images, +# so just use bourn shell ... + +# +# Update certificate authority index - +# environment may have mounted more authorities +# - ex: /usr/local/share/ca-certificates/cdis-ca.crt into system bundle +# + +GEN3_DEBUG="${GEN3_DEBUG:-False}" +GEN3_DRYRUN="${GEN3_DRYRUN:-False}" +GEN3_UWSGI_TIMEOUT="${GEN3_UWSGI_TIMEOUT:-45s}" + +run() { + if [ "$GEN3_DRYRUN" = True ]; then + echo "DRY RUN - not running: $@" + else + echo "Running $@" + "$@" + fi +} + +help() { + cat - <> ./wsgi.py +fi + +if [[ -z $DD_ENABLED ]]; then +( + run uwsgi --ini /etc/uwsgi/uwsgi.ini +) & +else +pip install ddtrace +echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini +( + ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini +) & +fi + +run nginx -g 'daemon off;' +wait diff --git a/Docker/python-nginx/python3.6-buster/entrypoint.sh b/Docker/python-nginx/python3.6-buster/entrypoint.sh new file mode 100755 index 000000000..2a6d72f6f --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/entrypoint.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env sh +set -e + +rate_limit="" +if [ ! -z $NGINX_RATE_LIMIT ]; then + echo "Found NGINX_RATE_LIMIT environment variable..." + if [ ! -z $OVERRIDE_NGINX_RATE_LIMIT ]; then + rate_limit=$OVERRIDE_NGINX_RATE_LIMIT + echo "Overriding Nginx rate limit with new value ${rate_limit}..." + else + rate_limit=$NGINX_RATE_LIMIT + echo "Applying Nginx rate limit from k8s deployment descriptor..." + fi + + # Add rate_limit config + rate_limit_conf="\ \ \ \ limit_req_zone \$binary_remote_addr zone=one:10m rate=${rate_limit}r/s;" + sed -i "/http\ {/a ${rate_limit_conf}" /etc/nginx/nginx.conf + if [ -f /etc/nginx/sites-available/uwsgi.conf ]; then + limit_req_config="\ \ \ \ \ \ \ \ limit_req zone=one;" + sed -i "/location\ \/\ {/a ${limit_req_config}" /etc/nginx/sites-available/uwsgi.conf + fi +fi + +# Get the maximum upload file size for Nginx, default to 0: unlimited +USE_NGINX_MAX_UPLOAD=${NGINX_MAX_UPLOAD:-0} +# Generate Nginx config for maximum upload file size +echo "client_max_body_size $USE_NGINX_MAX_UPLOAD;" > /etc/nginx/conf.d/upload.conf + +# Explicitly add installed Python packages and uWSGI Python packages to PYTHONPATH +# Otherwise uWSGI can't import Flask +export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.6/site-packages:/usr/lib/python3.6/site-packages + +# Get the number of workers for Nginx, default to 1 +USE_NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1} +# Modify the number of worker processes in Nginx config +sed -i "/worker_processes\s/c\worker_processes ${USE_NGINX_WORKER_PROCESSES};" /etc/nginx/nginx.conf + +# Set the max number of connections per worker for Nginx, if requested +# Cannot exceed worker_rlimit_nofile, see NGINX_WORKER_OPEN_FILES below +if [ -n "$NGINX_WORKER_CONNECTIONS" ] ; then + sed -i "/worker_connections\s/c\ worker_connections ${NGINX_WORKER_CONNECTIONS};" /etc/nginx/nginx.conf +fi + +# Set the max number of open file descriptors for Nginx workers, if requested +if [ -n "$NGINX_WORKER_OPEN_FILES" ] ; then + echo "worker_rlimit_nofile ${NGINX_WORKER_OPEN_FILES};" >> /etc/nginx/nginx.conf +fi + +# Get the listen port for Nginx, default to 80 +USE_LISTEN_PORT=${LISTEN_PORT:-80} +# Modify Nignx config for listen port +if ! grep -q "listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf ; then + sed -i -e "/server {/a\ listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf +fi +exec "$@" diff --git a/Docker/python-nginx/python3.6-buster/logrotate-nginx.conf b/Docker/python-nginx/python3.6-buster/logrotate-nginx.conf new file mode 100644 index 000000000..fc6b7e3c3 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/logrotate-nginx.conf @@ -0,0 +1,9 @@ +# nginx log rotation +/var/log/nginx { + weekly + size 10M + postrotate + [ ! -f /var/run/nginx.pid ] || kill -USR1 `cat /var/run/nginx.pid` + endscript + rotate 5 +} diff --git a/Docker/python-nginx/python3.6-buster/nginx.conf b/Docker/python-nginx/python3.6-buster/nginx.conf new file mode 100755 index 000000000..52976c8e8 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/nginx.conf @@ -0,0 +1,52 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # Logging Settings + ## + log_format json '{"gen3log": "nginx", ' + '"date_access": "$time_iso8601", ' + '"user_id": "$http_x_userid", ' + '"request_id": "$http_x_reqid", ' + '"session_id": "$http_x_sessionid", ' + '"visitor_id": "$http_x_visitorid", ' + '"network_client_ip": "$http_x_forwarded_for", ' + '"network_bytes_write": $body_bytes_sent, ' + '"response_secs": $request_time, ' + '"http_status_code": $status, ' + '"http_request": "$request_uri", ' + '"http_verb": "$request_method", ' + '"http_referer": "$http_referer", ' + '"http_useragent": "$http_user_agent", ' + '"message": "$request"}'; + + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access_not_json.log main; + access_log /var/log/nginx/access.log json; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/Docker/python-nginx/python3.6-buster/supervisord.ini b/Docker/python-nginx/python3.6-buster/supervisord.ini new file mode 100755 index 000000000..3a6323227 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/supervisord.ini @@ -0,0 +1,18 @@ +[supervisord] +nodaemon=true + +[program:uwsgi] +command=/usr/sbin/uwsgi --ini /etc/uwsgi/uwsgi.ini --die-on-term --need-app --plugin python3 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 + +[program:nginx] +command=/usr/sbin/nginx +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +# Graceful stop, see http://nginx.org/en/docs/control.html +stopsignal=QUIT diff --git a/Docker/python-nginx/python3.6-buster/uwsgi.conf b/Docker/python-nginx/python3.6-buster/uwsgi.conf new file mode 100644 index 000000000..97c53335d --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/uwsgi.conf @@ -0,0 +1,68 @@ +server { + listen 6567; + + root /var/www/metrics; + + location /aggregated_metrics { + types {} + default_type text/plain; + try_files $uri $uri/ /metrics.txt; + autoindex on; + access_log off; + } +} + +server { + listen 80; + + large_client_header_buffers 4 64k; + + location / { + uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty; + uwsgi_param REMOTE_USER $http_x_userid if_not_empty; + uwsgi_param REMOTE_REQID $http_x_reqid if_not_empty; + uwsgi_param REMOTE_SESSIONID $http_x_sessionid if_not_empty; + uwsgi_param REMOTE_VISITORID $http_x_visitorid if_not_empty; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_send_timeout GEN3_UWSGI_TIMEOUT; + } + + location /_status { + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_ignore_client_abort on; + access_log off; + } + + location /nginx_status { + stub_status; + allow 127.0.0.1; + deny all; + access_log off; + } + + location /uwsgi_status { + proxy_pass "http://127.0.0.1:9191"; + allow 127.0.0.1; + deny all; + access_log off; + } + + error_page 502 /502.html; + location /502.html { + return 504 '{"error": "Request Timeout or Service Unavailable"}'; + } + + error_page 504 /504.html; + location /504.html { + return 504 '{"error": "Request Timeout"}'; + } +} diff --git a/Docker/python-nginx/python3.6-buster/uwsgi.ini b/Docker/python-nginx/python3.6-buster/uwsgi.ini new file mode 100755 index 000000000..0cb4e6360 --- /dev/null +++ b/Docker/python-nginx/python3.6-buster/uwsgi.ini @@ -0,0 +1,6 @@ +[uwsgi] +socket = /tmp/uwsgi.sock +chown-socket = nginx:nginx +chmod-socket = 664 +# Graceful shutdown on SIGTERM, see https://github.com/unbit/uwsgi/issues/849#issuecomment-118869386 +hook-master-start = unix_signal:15 gracefully_kill_them_all diff --git a/Docker/python-nginx/python3.8-alpine3.11/Dockerfile b/Docker/python-nginx/python3.8-alpine3.11/Dockerfile new file mode 100755 index 000000000..0115dbf1d --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/Dockerfile @@ -0,0 +1,209 @@ +# Dockerfile written by Sebastian Ramirez at https://github.com/tiangolo/uwsgi-nginx-docker + +FROM quay.io/pcdc/python_3.8-alpine3_11:latest + + +# Standard set up Nginx Alpine +# https://github.com/nginxinc/docker-nginx/blob/f3fc4d5753f0ebb9107738183b9c5cea1bf3f618/mainline/alpine/Dockerfile + +ENV NGINX_VERSION 1.15.3 + +RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \ + && CONFIG="\ + --prefix=/etc/nginx \ + --sbin-path=/usr/sbin/nginx \ + --modules-path=/usr/lib/nginx/modules \ + --conf-path=/etc/nginx/nginx.conf \ + --error-log-path=/var/log/nginx/error.log \ + --http-log-path=/var/log/nginx/access.log \ + --pid-path=/var/run/nginx.pid \ + --lock-path=/var/run/nginx.lock \ + --http-client-body-temp-path=/var/cache/nginx/client_temp \ + --http-proxy-temp-path=/var/cache/nginx/proxy_temp \ + --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ + --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ + --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ + --user=nginx \ + --group=nginx \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-http_addition_module \ + --with-http_sub_module \ + --with-http_dav_module \ + --with-http_flv_module \ + --with-http_mp4_module \ + --with-http_gunzip_module \ + --with-http_gzip_static_module \ + --with-http_random_index_module \ + --with-http_secure_link_module \ + --with-http_stub_status_module \ + --with-http_auth_request_module \ + --with-http_xslt_module=dynamic \ + --with-http_image_filter_module=dynamic \ + --with-http_geoip_module=dynamic \ + --with-threads \ + --with-stream \ + --with-stream_ssl_module \ + --with-stream_ssl_preread_module \ + --with-stream_realip_module \ + --with-stream_geoip_module=dynamic \ + --with-http_slice_module \ + --with-mail \ + --with-mail_ssl_module \ + --with-compat \ + --with-file-aio \ + --with-http_v2_module \ + " \ + && addgroup -S nginx \ + && adduser -D -S -h /var/cache/nginx -s /sbin/nologin -G nginx nginx \ + && apk add --no-cache --virtual .build-deps \ + gcc \ + libc-dev \ + make \ + openssl-dev \ + pcre-dev \ + zlib-dev \ + linux-headers \ + curl \ + gnupg1 \ + libxslt-dev \ + gd-dev \ + geoip-dev \ + logrotate \ + && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz -o nginx.tar.gz \ + && curl -fSL https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz.asc -o nginx.tar.gz.asc \ + # install Rust + && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.51 \ + && export GNUPGHOME="$(mktemp -d)" \ + && found=''; \ + for server in \ + ha.pool.sks-keyservers.net \ + hkp://keyserver.ubuntu.com:80 \ + hkp://p80.pool.sks-keyservers.net:80 \ + pgp.mit.edu \ + ; do \ + echo "Fetching GPG key $GPG_KEYS from $server"; \ + gpg --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$GPG_KEYS" && found=yes && break; \ + done; \ + test -z "$found" && echo >&2 "error: failed to fetch GPG key $GPG_KEYS" && exit 1; \ + gpg --batch --verify nginx.tar.gz.asc nginx.tar.gz \ + && rm -rf "$GNUPGHOME" nginx.tar.gz.asc \ + && mkdir -p /usr/src \ + && tar -zxC /usr/src -f nginx.tar.gz \ + && rm nginx.tar.gz \ + && cd /usr/src/nginx-$NGINX_VERSION \ + && ./configure $CONFIG --with-debug \ + && make -j$(getconf _NPROCESSORS_ONLN) \ + && mv objs/nginx objs/nginx-debug \ + && mv objs/ngx_http_xslt_filter_module.so objs/ngx_http_xslt_filter_module-debug.so \ + && mv objs/ngx_http_image_filter_module.so objs/ngx_http_image_filter_module-debug.so \ + && mv objs/ngx_http_geoip_module.so objs/ngx_http_geoip_module-debug.so \ + && mv objs/ngx_stream_geoip_module.so objs/ngx_stream_geoip_module-debug.so \ + && ./configure $CONFIG \ + && make -j$(getconf _NPROCESSORS_ONLN) \ + && make install \ + && rm -rf /etc/nginx/html/ \ + && mkdir /etc/nginx/conf.d/ \ + && mkdir -p /usr/share/nginx/html/ \ + && install -m644 html/index.html /usr/share/nginx/html/ \ + && install -m644 html/50x.html /usr/share/nginx/html/ \ + && install -m755 objs/nginx-debug /usr/sbin/nginx-debug \ + && install -m755 objs/ngx_http_xslt_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_xslt_filter_module-debug.so \ + && install -m755 objs/ngx_http_image_filter_module-debug.so /usr/lib/nginx/modules/ngx_http_image_filter_module-debug.so \ + && install -m755 objs/ngx_http_geoip_module-debug.so /usr/lib/nginx/modules/ngx_http_geoip_module-debug.so \ + && install -m755 objs/ngx_stream_geoip_module-debug.so /usr/lib/nginx/modules/ngx_stream_geoip_module-debug.so \ + && ln -s ../../usr/lib/nginx/modules /etc/nginx/modules \ + && strip /usr/sbin/nginx* \ + && strip /usr/lib/nginx/modules/*.so \ + && rm -rf /usr/src/nginx-$NGINX_VERSION \ + \ + # Bring in gettext so we can get `envsubst`, then throw + # the rest away. To do this, we need to install `gettext` + # then move `envsubst` out of the way so `gettext` can + # be deleted completely, then move `envsubst` back. + && apk add --no-cache --virtual .gettext gettext \ + && mv /usr/bin/envsubst /tmp/ \ + \ + && runDeps="$( \ + scanelf --needed --nobanner --format '%n#p' /usr/sbin/nginx /usr/lib/nginx/modules/*.so /tmp/envsubst \ + | tr ',' '\n' \ + | sort -u \ + | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ + )" \ + && apk add --no-cache --virtual .nginx-rundeps $runDeps \ + && apk del .build-deps \ + && apk del .gettext \ + && mv /tmp/envsubst /usr/local/bin/ \ + \ + # Bring in tzdata so users could set the timezones through the environment + # variables + && apk add --no-cache tzdata \ + \ + # forward request and error logs to docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log + +COPY nginx.conf /etc/nginx/nginx.conf +COPY uwsgi.conf /etc/nginx/sites-available/ + +# Standard set up Nginx finished + +EXPOSE 80 + + +# # Expose 443, in case of LTS / HTTPS +EXPOSE 443 + +# Install uWSGI +RUN apk add --no-cache uwsgi-python3 + +# Copy the base uWSGI ini file to enable default dynamic uwsgi process number +COPY uwsgi.ini /etc/uwsgi/ +RUN ln -s /etc/nginx/sites-available/uwsgi.conf /etc/nginx/conf.d/uwsgi.conf + +# Install Supervisord +RUN apk add --no-cache supervisor +# Custom Supervisord config +COPY supervisord.ini /etc/supervisor.d/supervisord.ini + +# Which uWSGI .ini file should be used, to make it customizable +ENV UWSGI_INI /app/uwsgi.ini + +# By default, disable uwsgi cheaper mode and run 2 processes. +# If UWSGI_CHEAPER=N and UWSGI_PROCESSES=M, N is the min and M is the max +# number of processes. UWSGI_CHEAPER must be lower than UWSGI_PROCESSES. +# We set them here instead of in uwsgi.ini so that they can be overwritten. +ENV UWSGI_CHEAPER= +ENV UWSGI_PROCESSES=2 + +# By default, allow unlimited file sizes, modify it to limit the file sizes +# To have a maximum of 1 MB (Nginx's default) change the line to: +ENV NGINX_MAX_UPLOAD 1m +ENV NGINX_MAX_UPLOAD 0 + +# By default, Nginx will run a single worker process, setting it to auto +# will create a worker for each CPU core +ENV NGINX_WORKER_PROCESSES 1 + +# By default, Nginx listens on port 80. +# To modify this, change LISTEN_PORT environment variable. +# (in a Dockerfile or with an option for `docker run`) +ENV LISTEN_PORT 80 + +# Copy the entrypoint that will generate Nginx additional configs +COPY entrypoint.sh /entrypoint.sh +COPY logrotate-nginx.conf /etc/logrotate.d/nginx +RUN chmod +x /entrypoint.sh + +ENV PATH="/root/.cargo/bin:${PATH}" + +COPY dockerrun.sh /dockerrun.sh +RUN mkdir -p /var/www/metrics/ && chmod +x /dockerrun.sh + +ENTRYPOINT ["sh", "/entrypoint.sh"] + +# Add demo app +COPY ./app /app +WORKDIR /app + +CMD ["/usr/bin/supervisord"] diff --git a/Docker/python-nginx/python3.8-alpine3.11/app/main.py b/Docker/python-nginx/python3.8-alpine3.11/app/main.py new file mode 100755 index 000000000..216c258f4 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/app/main.py @@ -0,0 +1,4 @@ +def application(env, start_response): + start_response('200 OK', [('Content-Type', 'text/html')]) + return [b"Hello World from a default Nginx uWSGI Python 3.6 app in a\ + Docker container (default)"] diff --git a/Docker/python-nginx/python3.8-alpine3.11/app/uwsgi.ini b/Docker/python-nginx/python3.8-alpine3.11/app/uwsgi.ini new file mode 100755 index 000000000..8a29f7350 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/app/uwsgi.ini @@ -0,0 +1,2 @@ +[uwsgi] +wsgi-file=/app/main.py diff --git a/Docker/python-nginx/python3.8-alpine3.11/dockerrun.sh b/Docker/python-nginx/python3.8-alpine3.11/dockerrun.sh new file mode 100644 index 000000000..e73de6697 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/dockerrun.sh @@ -0,0 +1,102 @@ +#!/bin/sh +# +# Note: base alpine Linux image may not include bash shell, +# and we probably want to move to that for service images, +# so just use bourn shell ... + +# +# Update certificate authority index - +# environment may have mounted more authorities +# - ex: /usr/local/share/ca-certificates/cdis-ca.crt into system bundle +# + +GEN3_DEBUG="${GEN3_DEBUG:-False}" +GEN3_DRYRUN="${GEN3_DRYRUN:-False}" +GEN3_UWSGI_TIMEOUT="${GEN3_UWSGI_TIMEOUT:-45s}" + +run() { + if [ "$GEN3_DRYRUN" = True ]; then + echo "DRY RUN - not running: $@" + else + echo "Running $@" + "$@" + fi +} + +help() { + cat - <> ./wsgi.py +fi + +if [[ -z $DD_ENABLED ]]; then +( + run uwsgi --ini /etc/uwsgi/uwsgi.ini +) & +else +pip install ddtrace +echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini +( + ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini +) & +fi + +run nginx -g 'daemon off;' +wait diff --git a/Docker/python-nginx/python3.8-alpine3.11/entrypoint.sh b/Docker/python-nginx/python3.8-alpine3.11/entrypoint.sh new file mode 100755 index 000000000..2a6d72f6f --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/entrypoint.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env sh +set -e + +rate_limit="" +if [ ! -z $NGINX_RATE_LIMIT ]; then + echo "Found NGINX_RATE_LIMIT environment variable..." + if [ ! -z $OVERRIDE_NGINX_RATE_LIMIT ]; then + rate_limit=$OVERRIDE_NGINX_RATE_LIMIT + echo "Overriding Nginx rate limit with new value ${rate_limit}..." + else + rate_limit=$NGINX_RATE_LIMIT + echo "Applying Nginx rate limit from k8s deployment descriptor..." + fi + + # Add rate_limit config + rate_limit_conf="\ \ \ \ limit_req_zone \$binary_remote_addr zone=one:10m rate=${rate_limit}r/s;" + sed -i "/http\ {/a ${rate_limit_conf}" /etc/nginx/nginx.conf + if [ -f /etc/nginx/sites-available/uwsgi.conf ]; then + limit_req_config="\ \ \ \ \ \ \ \ limit_req zone=one;" + sed -i "/location\ \/\ {/a ${limit_req_config}" /etc/nginx/sites-available/uwsgi.conf + fi +fi + +# Get the maximum upload file size for Nginx, default to 0: unlimited +USE_NGINX_MAX_UPLOAD=${NGINX_MAX_UPLOAD:-0} +# Generate Nginx config for maximum upload file size +echo "client_max_body_size $USE_NGINX_MAX_UPLOAD;" > /etc/nginx/conf.d/upload.conf + +# Explicitly add installed Python packages and uWSGI Python packages to PYTHONPATH +# Otherwise uWSGI can't import Flask +export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.6/site-packages:/usr/lib/python3.6/site-packages + +# Get the number of workers for Nginx, default to 1 +USE_NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1} +# Modify the number of worker processes in Nginx config +sed -i "/worker_processes\s/c\worker_processes ${USE_NGINX_WORKER_PROCESSES};" /etc/nginx/nginx.conf + +# Set the max number of connections per worker for Nginx, if requested +# Cannot exceed worker_rlimit_nofile, see NGINX_WORKER_OPEN_FILES below +if [ -n "$NGINX_WORKER_CONNECTIONS" ] ; then + sed -i "/worker_connections\s/c\ worker_connections ${NGINX_WORKER_CONNECTIONS};" /etc/nginx/nginx.conf +fi + +# Set the max number of open file descriptors for Nginx workers, if requested +if [ -n "$NGINX_WORKER_OPEN_FILES" ] ; then + echo "worker_rlimit_nofile ${NGINX_WORKER_OPEN_FILES};" >> /etc/nginx/nginx.conf +fi + +# Get the listen port for Nginx, default to 80 +USE_LISTEN_PORT=${LISTEN_PORT:-80} +# Modify Nignx config for listen port +if ! grep -q "listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf ; then + sed -i -e "/server {/a\ listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf +fi +exec "$@" diff --git a/Docker/python-nginx/python3.8-alpine3.11/logrotate-nginx.conf b/Docker/python-nginx/python3.8-alpine3.11/logrotate-nginx.conf new file mode 100644 index 000000000..fc6b7e3c3 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/logrotate-nginx.conf @@ -0,0 +1,9 @@ +# nginx log rotation +/var/log/nginx { + weekly + size 10M + postrotate + [ ! -f /var/run/nginx.pid ] || kill -USR1 `cat /var/run/nginx.pid` + endscript + rotate 5 +} diff --git a/Docker/python-nginx/python3.8-alpine3.11/nginx.conf b/Docker/python-nginx/python3.8-alpine3.11/nginx.conf new file mode 100755 index 000000000..52976c8e8 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/nginx.conf @@ -0,0 +1,52 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # Logging Settings + ## + log_format json '{"gen3log": "nginx", ' + '"date_access": "$time_iso8601", ' + '"user_id": "$http_x_userid", ' + '"request_id": "$http_x_reqid", ' + '"session_id": "$http_x_sessionid", ' + '"visitor_id": "$http_x_visitorid", ' + '"network_client_ip": "$http_x_forwarded_for", ' + '"network_bytes_write": $body_bytes_sent, ' + '"response_secs": $request_time, ' + '"http_status_code": $status, ' + '"http_request": "$request_uri", ' + '"http_verb": "$request_method", ' + '"http_referer": "$http_referer", ' + '"http_useragent": "$http_user_agent", ' + '"message": "$request"}'; + + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access_not_json.log main; + access_log /var/log/nginx/access.log json; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/Docker/python-nginx/python3.8-alpine3.11/supervisord.ini b/Docker/python-nginx/python3.8-alpine3.11/supervisord.ini new file mode 100755 index 000000000..3a6323227 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/supervisord.ini @@ -0,0 +1,18 @@ +[supervisord] +nodaemon=true + +[program:uwsgi] +command=/usr/sbin/uwsgi --ini /etc/uwsgi/uwsgi.ini --die-on-term --need-app --plugin python3 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 + +[program:nginx] +command=/usr/sbin/nginx +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +# Graceful stop, see http://nginx.org/en/docs/control.html +stopsignal=QUIT diff --git a/Docker/python-nginx/python3.8-alpine3.11/uwsgi.conf b/Docker/python-nginx/python3.8-alpine3.11/uwsgi.conf new file mode 100644 index 000000000..97c53335d --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/uwsgi.conf @@ -0,0 +1,68 @@ +server { + listen 6567; + + root /var/www/metrics; + + location /aggregated_metrics { + types {} + default_type text/plain; + try_files $uri $uri/ /metrics.txt; + autoindex on; + access_log off; + } +} + +server { + listen 80; + + large_client_header_buffers 4 64k; + + location / { + uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty; + uwsgi_param REMOTE_USER $http_x_userid if_not_empty; + uwsgi_param REMOTE_REQID $http_x_reqid if_not_empty; + uwsgi_param REMOTE_SESSIONID $http_x_sessionid if_not_empty; + uwsgi_param REMOTE_VISITORID $http_x_visitorid if_not_empty; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_send_timeout GEN3_UWSGI_TIMEOUT; + } + + location /_status { + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_ignore_client_abort on; + access_log off; + } + + location /nginx_status { + stub_status; + allow 127.0.0.1; + deny all; + access_log off; + } + + location /uwsgi_status { + proxy_pass "http://127.0.0.1:9191"; + allow 127.0.0.1; + deny all; + access_log off; + } + + error_page 502 /502.html; + location /502.html { + return 504 '{"error": "Request Timeout or Service Unavailable"}'; + } + + error_page 504 /504.html; + location /504.html { + return 504 '{"error": "Request Timeout"}'; + } +} diff --git a/Docker/python-nginx/python3.8-alpine3.11/uwsgi.ini b/Docker/python-nginx/python3.8-alpine3.11/uwsgi.ini new file mode 100755 index 000000000..0cb4e6360 --- /dev/null +++ b/Docker/python-nginx/python3.8-alpine3.11/uwsgi.ini @@ -0,0 +1,6 @@ +[uwsgi] +socket = /tmp/uwsgi.sock +chown-socket = nginx:nginx +chmod-socket = 664 +# Graceful shutdown on SIGTERM, see https://github.com/unbit/uwsgi/issues/849#issuecomment-118869386 +hook-master-start = unix_signal:15 gracefully_kill_them_all diff --git a/Docker/python-nginx/python3.8-buster/Dockerfile b/Docker/python-nginx/python3.8-buster/Dockerfile new file mode 100644 index 000000000..01b2d7cf5 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/Dockerfile @@ -0,0 +1,169 @@ +# https://github.com/tiangolo/uwsgi-nginx-docker/blob/master/docker-images/python3.6.dockerfile +FROM quay.io/pcdc/python_3.8.12-buster:1.0.0 + + +# https://github.com/nginxinc/docker-nginx/blob/f958fbacada447737319e979db45a1da49123142/mainline/debian/Dockerfile +ENV NGINX_VERSION 1.21.1 +ENV NJS_VERSION 0.6.1 +ENV PKG_RELEASE 1~buster + +RUN set -x \ +# create nginx user/group first, to be consistent throughout docker variants + && addgroup --system --gid 102 nginx \ + && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 102 nginx \ +# also add nginx user to gid 101 (ssh) groups + && adduser nginx ssh \ + && apt-get update \ + && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates \ + && \ + NGINX_GPGKEY=573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62; \ + found=''; \ + for server in \ + ha.pool.sks-keyservers.net \ + hkp://keyserver.ubuntu.com:80 \ + hkp://p80.pool.sks-keyservers.net:80 \ + pgp.mit.edu \ + ; do \ + echo "Fetching GPG key $NGINX_GPGKEY from $server"; \ + apt-key adv --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$NGINX_GPGKEY" && found=yes && break; \ + done; \ + test -z "$found" && echo >&2 "error: failed to fetch GPG key $NGINX_GPGKEY" && exit 1; \ + apt-get remove --purge --auto-remove -y gnupg1 && rm -rf /var/lib/apt/lists/* \ + && dpkgArch="$(dpkg --print-architecture)" \ + && nginxPackages=" \ + nginx=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-xslt=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-geoip=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-image-filter=${NGINX_VERSION}-${PKG_RELEASE} \ + nginx-module-njs=${NGINX_VERSION}+${NJS_VERSION}-${PKG_RELEASE} \ + " \ + && case "$dpkgArch" in \ + amd64|i386|arm64) \ +# arches officialy built by upstream + echo "deb https://nginx.org/packages/mainline/debian/ buster nginx" >> /etc/apt/sources.list.d/nginx.list \ + && apt-get update \ + ;; \ + *) \ +# we're on an architecture upstream doesn't officially build for +# let's build binaries from the published source packages + echo "deb-src https://nginx.org/packages/mainline/debian/ buster nginx" >> /etc/apt/sources.list.d/nginx.list \ + \ +# new directory for storing sources and .deb files + && tempDir="$(mktemp -d)" \ + && chmod 777 "$tempDir" \ +# (777 to ensure APT's "_apt" user can access it too) + \ +# save list of currently-installed packages so build dependencies can be cleanly removed later + && savedAptMark="$(apt-mark showmanual)" \ + \ +# build .deb files from upstream's source packages (which are verified by apt-get) + && apt-get update \ + && apt-get build-dep -y $nginxPackages \ + && ( \ + cd "$tempDir" \ + && DEB_BUILD_OPTIONS="nocheck parallel=$(nproc)" \ + apt-get source --compile $nginxPackages \ + ) \ +# we don't remove APT lists here because they get re-downloaded and removed later + \ +# reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies +# (which is done after we install the built packages so we don't have to redownload any overlapping dependencies) + && apt-mark showmanual | xargs apt-mark auto > /dev/null \ + && { [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; } \ + \ +# create a temporary local APT repo to install from (so that dependency resolution can be handled by APT, as it should be) + && ls -lAFh "$tempDir" \ + && ( cd "$tempDir" && dpkg-scanpackages . > Packages ) \ + && grep '^Package: ' "$tempDir/Packages" \ + && echo "deb [ trusted=yes ] file://$tempDir ./" > /etc/apt/sources.list.d/temp.list \ +# work around the following APT issue by using "Acquire::GzipIndexes=false" (overriding "/etc/apt/apt.conf.d/docker-gzip-indexes") +# Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) +# ... +# E: Failed to fetch store:/var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages Could not open file /var/lib/apt/lists/partial/_tmp_tmp.ODWljpQfkE_._Packages - open (13: Permission denied) + && apt-get -o Acquire::GzipIndexes=false update \ + ;; \ + esac \ + \ + && apt-get install --no-install-recommends --no-install-suggests -y \ + $nginxPackages \ + gettext-base \ + curl \ + && apt-get remove --purge --auto-remove -y && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list \ + \ +# if we have leftovers from building, let's purge them (including extra, unnecessary build deps) + && if [ -n "$tempDir" ]; then \ + apt-get purge -y --auto-remove \ + && rm -rf "$tempDir" /etc/apt/sources.list.d/temp.list; \ + fi \ +# forward request and error logs to docker log collector + && ln -sf /dev/stdout /var/log/nginx/access.log \ + && ln -sf /dev/stderr /var/log/nginx/error.log \ +# create a docker-entrypoint.d directory + && mkdir /docker-entrypoint.d + +EXPOSE 80 + +# # Expose 443, in case of LTS / HTTPS +EXPOSE 443 + +# install uwsgi +# https://uwsgi-docs.readthedocs.io/en/latest/Install.html +RUN python -m pip install --upgrade pip +RUN pip install uwsgi + +# Remove default configuration from Nginx +RUN rm /etc/nginx/conf.d/default.conf +# Copy the base uWSGI ini file to enable default dynamic uwsgi process number +COPY uwsgi.ini /etc/uwsgi/ + +COPY uwsgi.conf /etc/nginx/sites-available/ + +RUN ln -s /etc/nginx/sites-available/uwsgi.conf /etc/nginx/conf.d/uwsgi.conf + +# Install Supervisord +RUN apt-get update && apt-get install -y supervisor \ +&& rm -rf /var/lib/apt/lists/* +# Custom Supervisord config +COPY supervisord.ini /etc/supervisor.d/supervisord.ini + +# Which uWSGI .ini file should be used, to make it customizable +ENV UWSGI_INI /app/uwsgi.ini + +# By default, disable uwsgi cheaper mode and run 2 processes. +# If UWSGI_CHEAPER=N and UWSGI_PROCESSES=M, N is the min and M is the max +# number of processes. UWSGI_CHEAPER must be lower than UWSGI_PROCESSES. +# We set them here instead of in uwsgi.ini so that they can be overwritten. +ENV UWSGI_CHEAPER= +ENV UWSGI_PROCESSES=2 + +# By default, allow unlimited file sizes, modify it to limit the file sizes +# To have a maximum of 1 MB (Nginx's default) change the line to: +# ENV NGINX_MAX_UPLOAD 1m +ENV NGINX_MAX_UPLOAD 0 + +# By default, Nginx will run a single worker process, setting it to auto +# will create a worker for each CPU core +ENV NGINX_WORKER_PROCESSES 1 + +# By default, Nginx listens on port 80. +# To modify this, change LISTEN_PORT environment variable. +# (in a Dockerfile or with an option for `docker run`) +ENV LISTEN_PORT 80 + +# Copy the entrypoint that will generate Nginx additional configs +COPY entrypoint.sh /entrypoint.sh +COPY logrotate-nginx.conf /etc/logrotate.d/nginx +RUN chmod +x /entrypoint.sh + +ENV PATH="/root/.cargo/bin:${PATH}" + +COPY dockerrun.sh /dockerrun.sh +RUN mkdir -p /var/www/metrics/ && chmod +x /dockerrun.sh + +ENTRYPOINT ["sh", "/entrypoint.sh"] + +# Add demo app +COPY ./app /app +WORKDIR /app + +CMD ["/usr/bin/supervisord"] diff --git a/Docker/python-nginx/python3.8-buster/app/main.py b/Docker/python-nginx/python3.8-buster/app/main.py new file mode 100755 index 000000000..216c258f4 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/app/main.py @@ -0,0 +1,4 @@ +def application(env, start_response): + start_response('200 OK', [('Content-Type', 'text/html')]) + return [b"Hello World from a default Nginx uWSGI Python 3.6 app in a\ + Docker container (default)"] diff --git a/Docker/python-nginx/python3.8-buster/app/uwsgi.ini b/Docker/python-nginx/python3.8-buster/app/uwsgi.ini new file mode 100755 index 000000000..8a29f7350 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/app/uwsgi.ini @@ -0,0 +1,2 @@ +[uwsgi] +wsgi-file=/app/main.py diff --git a/Docker/python-nginx/python3.8-buster/dockerrun.sh b/Docker/python-nginx/python3.8-buster/dockerrun.sh new file mode 100644 index 000000000..91f79d302 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/dockerrun.sh @@ -0,0 +1,102 @@ +#!/bin/sh +# +# Note: base alpine Linux image may not include bash shell, +# and we probably want to move to that for service images, +# so just use bourn shell ... + +# +# Update certificate authority index - +# environment may have mounted more authorities +# - ex: /usr/local/share/ca-certificates/cdis-ca.crt into system bundle +# + +GEN3_DEBUG="${GEN3_DEBUG:-False}" +GEN3_DRYRUN="${GEN3_DRYRUN:-False}" +GEN3_UWSGI_TIMEOUT="${GEN3_UWSGI_TIMEOUT:-45s}" + +run() { + if [ "$GEN3_DRYRUN" = True ]; then + echo "DRY RUN - not running: $@" + else + echo "Running $@" + "$@" + fi +} + +help() { + cat - <> ./wsgi.py +fi + +if [[ -z $DD_ENABLED ]]; then +( + run uwsgi --ini /etc/uwsgi/uwsgi.ini +) & +else +pip install ddtrace +echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini +( + ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini +) & +fi + +run nginx -g 'daemon off;' +wait diff --git a/Docker/python-nginx/python3.8-buster/entrypoint.sh b/Docker/python-nginx/python3.8-buster/entrypoint.sh new file mode 100755 index 000000000..2a6d72f6f --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/entrypoint.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env sh +set -e + +rate_limit="" +if [ ! -z $NGINX_RATE_LIMIT ]; then + echo "Found NGINX_RATE_LIMIT environment variable..." + if [ ! -z $OVERRIDE_NGINX_RATE_LIMIT ]; then + rate_limit=$OVERRIDE_NGINX_RATE_LIMIT + echo "Overriding Nginx rate limit with new value ${rate_limit}..." + else + rate_limit=$NGINX_RATE_LIMIT + echo "Applying Nginx rate limit from k8s deployment descriptor..." + fi + + # Add rate_limit config + rate_limit_conf="\ \ \ \ limit_req_zone \$binary_remote_addr zone=one:10m rate=${rate_limit}r/s;" + sed -i "/http\ {/a ${rate_limit_conf}" /etc/nginx/nginx.conf + if [ -f /etc/nginx/sites-available/uwsgi.conf ]; then + limit_req_config="\ \ \ \ \ \ \ \ limit_req zone=one;" + sed -i "/location\ \/\ {/a ${limit_req_config}" /etc/nginx/sites-available/uwsgi.conf + fi +fi + +# Get the maximum upload file size for Nginx, default to 0: unlimited +USE_NGINX_MAX_UPLOAD=${NGINX_MAX_UPLOAD:-0} +# Generate Nginx config for maximum upload file size +echo "client_max_body_size $USE_NGINX_MAX_UPLOAD;" > /etc/nginx/conf.d/upload.conf + +# Explicitly add installed Python packages and uWSGI Python packages to PYTHONPATH +# Otherwise uWSGI can't import Flask +export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.6/site-packages:/usr/lib/python3.6/site-packages + +# Get the number of workers for Nginx, default to 1 +USE_NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1} +# Modify the number of worker processes in Nginx config +sed -i "/worker_processes\s/c\worker_processes ${USE_NGINX_WORKER_PROCESSES};" /etc/nginx/nginx.conf + +# Set the max number of connections per worker for Nginx, if requested +# Cannot exceed worker_rlimit_nofile, see NGINX_WORKER_OPEN_FILES below +if [ -n "$NGINX_WORKER_CONNECTIONS" ] ; then + sed -i "/worker_connections\s/c\ worker_connections ${NGINX_WORKER_CONNECTIONS};" /etc/nginx/nginx.conf +fi + +# Set the max number of open file descriptors for Nginx workers, if requested +if [ -n "$NGINX_WORKER_OPEN_FILES" ] ; then + echo "worker_rlimit_nofile ${NGINX_WORKER_OPEN_FILES};" >> /etc/nginx/nginx.conf +fi + +# Get the listen port for Nginx, default to 80 +USE_LISTEN_PORT=${LISTEN_PORT:-80} +# Modify Nignx config for listen port +if ! grep -q "listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf ; then + sed -i -e "/server {/a\ listen ${USE_LISTEN_PORT};" /etc/nginx/nginx.conf +fi +exec "$@" diff --git a/Docker/python-nginx/python3.8-buster/logrotate-nginx.conf b/Docker/python-nginx/python3.8-buster/logrotate-nginx.conf new file mode 100644 index 000000000..fc6b7e3c3 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/logrotate-nginx.conf @@ -0,0 +1,9 @@ +# nginx log rotation +/var/log/nginx { + weekly + size 10M + postrotate + [ ! -f /var/run/nginx.pid ] || kill -USR1 `cat /var/run/nginx.pid` + endscript + rotate 5 +} diff --git a/Docker/python-nginx/python3.8-buster/nginx.conf b/Docker/python-nginx/python3.8-buster/nginx.conf new file mode 100755 index 000000000..52976c8e8 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/nginx.conf @@ -0,0 +1,52 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # Logging Settings + ## + log_format json '{"gen3log": "nginx", ' + '"date_access": "$time_iso8601", ' + '"user_id": "$http_x_userid", ' + '"request_id": "$http_x_reqid", ' + '"session_id": "$http_x_sessionid", ' + '"visitor_id": "$http_x_visitorid", ' + '"network_client_ip": "$http_x_forwarded_for", ' + '"network_bytes_write": $body_bytes_sent, ' + '"response_secs": $request_time, ' + '"http_status_code": $status, ' + '"http_request": "$request_uri", ' + '"http_verb": "$request_method", ' + '"http_referer": "$http_referer", ' + '"http_useragent": "$http_user_agent", ' + '"message": "$request"}'; + + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access_not_json.log main; + access_log /var/log/nginx/access.log json; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/Docker/python-nginx/python3.8-buster/supervisord.ini b/Docker/python-nginx/python3.8-buster/supervisord.ini new file mode 100755 index 000000000..3a6323227 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/supervisord.ini @@ -0,0 +1,18 @@ +[supervisord] +nodaemon=true + +[program:uwsgi] +command=/usr/sbin/uwsgi --ini /etc/uwsgi/uwsgi.ini --die-on-term --need-app --plugin python3 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 + +[program:nginx] +command=/usr/sbin/nginx +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +# Graceful stop, see http://nginx.org/en/docs/control.html +stopsignal=QUIT diff --git a/Docker/python-nginx/python3.8-buster/uwsgi.conf b/Docker/python-nginx/python3.8-buster/uwsgi.conf new file mode 100644 index 000000000..97c53335d --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/uwsgi.conf @@ -0,0 +1,68 @@ +server { + listen 6567; + + root /var/www/metrics; + + location /aggregated_metrics { + types {} + default_type text/plain; + try_files $uri $uri/ /metrics.txt; + autoindex on; + access_log off; + } +} + +server { + listen 80; + + large_client_header_buffers 4 64k; + + location / { + uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty; + uwsgi_param REMOTE_USER $http_x_userid if_not_empty; + uwsgi_param REMOTE_REQID $http_x_reqid if_not_empty; + uwsgi_param REMOTE_SESSIONID $http_x_sessionid if_not_empty; + uwsgi_param REMOTE_VISITORID $http_x_visitorid if_not_empty; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_send_timeout GEN3_UWSGI_TIMEOUT; + } + + location /_status { + include uwsgi_params; + uwsgi_pass unix:/var/run/gen3/uwsgi.sock; + uwsgi_param GEN3_REQUEST_TIMESTAMP $msec; + uwsgi_param GEN3_TIMEOUT_SECONDS GEN3_UWSGI_TIMEOUT; + uwsgi_read_timeout GEN3_UWSGI_TIMEOUT; + uwsgi_ignore_client_abort on; + access_log off; + } + + location /nginx_status { + stub_status; + allow 127.0.0.1; + deny all; + access_log off; + } + + location /uwsgi_status { + proxy_pass "http://127.0.0.1:9191"; + allow 127.0.0.1; + deny all; + access_log off; + } + + error_page 502 /502.html; + location /502.html { + return 504 '{"error": "Request Timeout or Service Unavailable"}'; + } + + error_page 504 /504.html; + location /504.html { + return 504 '{"error": "Request Timeout"}'; + } +} diff --git a/Docker/python-nginx/python3.8-buster/uwsgi.ini b/Docker/python-nginx/python3.8-buster/uwsgi.ini new file mode 100755 index 000000000..0cb4e6360 --- /dev/null +++ b/Docker/python-nginx/python3.8-buster/uwsgi.ini @@ -0,0 +1,6 @@ +[uwsgi] +socket = /tmp/uwsgi.sock +chown-socket = nginx:nginx +chmod-socket = 664 +# Graceful shutdown on SIGTERM, see https://github.com/unbit/uwsgi/issues/849#issuecomment-118869386 +hook-master-start = unix_signal:15 gracefully_kill_them_all diff --git a/Jenkinsfile b/Jenkinsfile index 9ce47aa1f..ed040b487 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -217,7 +217,7 @@ spec: metricsHelper.writeMetricWithResult(STAGE_NAME, false) pipelineHelper.handleError(ex) } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) + metricsHelper.writeMetricWithResult(STAGE_NAME, true) } } } @@ -305,7 +305,7 @@ spec: } } - stage('python 3 base image dockerrun.sh test') { + stage('python 3 legacy alpine base image dockerrun.sh test') { steps { script { try { @@ -325,6 +325,26 @@ spec: } } + stage('python 3 buster base image dockerrun.sh test') { + steps { + script { + try { + if(!skipUnitTests) { + dir('cloud-automation/Docker/python-nginx/python3.6-buster') { + sh 'sh dockerrun.sh --dryrun=True' + } + } else { + Utils.markStageSkippedForConditional(STAGE_NAME) + } + } catch (ex) { + metricsHelper.writeMetricWithResult(STAGE_NAME, false) + pipelineHelper.handleError(ex) + } + metricsHelper.writeMetricWithResult(STAGE_NAME, true) + } + } + } + stage('WaitForQuayBuild') { steps { script { @@ -345,7 +365,7 @@ spec: } } } - + stage('SelectNamespace') { steps { script { @@ -381,7 +401,7 @@ spec: } } catch (ex) { metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) + pipelineHelper.handleError(ex) } metricsHelper.writeMetricWithResult(STAGE_NAME, true) } @@ -412,7 +432,7 @@ spec: metricsHelper.writeMetricWithResult(STAGE_NAME, true) } } - } + } stage('VerifyClusterHealth') { steps { @@ -438,7 +458,7 @@ spec: steps { script { try { - if(!doNotRunTests) { + if(!doNotRunTests) { testHelper.simulateData(kubectlNamespace) } else { Utils.markStageSkippedForConditional(STAGE_NAME) diff --git a/README.md b/README.md index db8cafea6..3990c2483 100644 --- a/README.md +++ b/README.md @@ -235,4 +235,3 @@ We have two automation scripts: The latest versions of the customized ubuntu 16.04 AMI's required by the terraform automation are published as public images under the AWS `cdis-test` account. Build new AMIs using [images](https://github.com/uc-cdis/images). - diff --git a/doc/gen3-sql-queries.md b/doc/gen3-sql-queries.md index c820c613b..6cca0ec3d 100644 --- a/doc/gen3-sql-queries.md +++ b/doc/gen3-sql-queries.md @@ -2,21 +2,21 @@ ## Fence Database -### Get All User Access by Username and Project.auth_id, include Authorization Source name +### Get All User Access by Username and Project.auth_id, include Authorization Source name and Identity Provider ```sql -select "User".username, project.auth_id, authorization_provider.name from access_privilege INNER JOIN "User" on access_privilege.user_id="User".id INNER JOIN project on access_privilege.project_id=project.id INNER JOIN authorization_provider on access_privilege.provider_id=authorization_provider.id ORDER BY "User".username; +select "User".username, project.auth_id, authorization_provider.name as authz_provider, identity_provider.name as idp from access_privilege INNER JOIN "User" on access_privilege.user_id="User".id INNER JOIN project on access_privilege.project_id=project.id INNER JOIN authorization_provider on access_privilege.provider_id=authorization_provider.id INNER JOIN identity_provider on "User".idp_id=identity_provider.id ORDER BY "User".username; ``` Example output: ```console - username | auth_id | name -----------------------------------+-----------+------- - USER_A | test1 | fence - USER_A | test2 | dbGaP - USER_B | test1 | fence - USER_B | test2 | fence - USER_B | test3 | dbGaP - USER_C | test2 | dbGaP + username | auth_id | authz_provider | idp | +----------------------------------+-----------+------------------+--------+ + USER_A | test1 | fence | ras | + USER_A | test2 | dbGaP | ras | + USER_B | test1 | fence | google | + USER_B | test2 | fence | google | + USER_B | test3 | dbGaP | google | + USER_C | test2 | dbGaP | ras | ``` diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user index 0ff820de4..1fcbf3858 100644 --- a/files/authorized_keys/squid_authorized_keys_user +++ b/files/authorized_keys/squid_authorized_keys_user @@ -16,3 +16,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXbyRLslcUSRwIxCYVqYsUDlvC0DR2woW95ID9Qkox ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDT5VxB1A2JOc3MurPSVH9U6x49PCZfaHgJD1FbKXgPvCrwvm5mS18Mgawai5SE3nL8KzjTMEUtoY3yl7Y9aHKY4JH3fnUIQfapGTKgzVMLOzRguD5XkZxEn8e2DU5/Tj1QLplAA8cip4mg4dOFVWZSG/0nQl7UI9tnLdoLQz1L37XX0cp4ra4FJN4xFIuE8ISxRHOBeuQ2S9wWuczuF3w17ciRPtkPQnNdSi4rS9o67FtGVTNhIPS5jjqlr6qBqNBz9u+AfzhLHuMTZ3Keb/ZtBoafKnQsU/F/YyxD7hDoGOd9e3orcO2gmKJOb8CC0Uv7aMLpANTvIDQ4nVVPYHyR+cxLH+T9EI20lANK18zJgFxYmiMiLTSaquYS5tK2l8pdNh8C/1bMdpgzdY1X+4UeTZ50Xm3LZMpg2vg1WgAoJkikAhvegRAistqbxDXfhPJOmr7B4JRg1mDPx8RMrc3+lkgbachMmQHQd05inzxCR2q2Y6huLVRW81dddSzILhGeayT2S4sGutCb1/XopvBSf9M1ZTrJtWVqNiWfiJHS6p+ji6DvO8mt6HWOmBcPV5a5icDF4S+FZf1q1MneUv4PksMexNbvd2RXdpcidkDJGgXZOkDGBbr5DZ+o/QC1dCF4zbYIY8DO+9DxjexBTMMYaYnr/ohkZ3OPDNn9P9WoyQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp diff --git a/gen3/bin/db.sh b/gen3/bin/db.sh index dc480ec22..ff3e6f6cc 100644 --- a/gen3/bin/db.sh +++ b/gen3/bin/db.sh @@ -588,6 +588,7 @@ gen3_db_encrypt() { gen3 db backup gdcapi > $dumpDir/gdcapidb-backup.sql gen3 db backup arborist > $dumpDir/arborist-backup.sql gen3 db backup metadata > $dumpDir/metadata-backup.sql + gen3 db backup gearbox > $dumpDir/gearbox-backup.sql gen3 db backup wts > $dumpDir/wts-backup.sql gen3 db backup requestor > $dumpDir/requestor-backup.sql gen3 db backup audit > $dumpDir/audit-backup.sql @@ -616,7 +617,7 @@ gen3_db_encrypt() { gen3 tfplan gen3 tfapply - # Use sed to update all secrets, remove the arborist and metadata g3auto folders to recreate those db's then run kube-setup-secrets + # Use sed to update all secrets, remove the arborist, gearbox and metadata g3auto folders to recreate those db's then run kube-setup-secrets local newFenceDbUrl=$(aws rds describe-db-instances --filters '{"Name": "db-instance-id", "Values": ["'$vpc_name'-encrypted-fencedb"]}' | jq -r .DBInstances[0].Endpoint.Address) local newAmanuensisDbUrl=$(aws rds describe-db-instances --filters '{"Name": "db-instance-id", "Values": ["'$vpc_name'-encrypted-amanuensisdb"]}' | jq -r .DBInstances[0].Endpoint.Address) local newIndexdDbUrl=$(aws rds describe-db-instances --filters '{"Name": "db-instance-id", "Values": ["'$vpc_name'-encrypted-indexddb"]}' | jq -r .DBInstances[0].Endpoint.Address) @@ -635,6 +636,7 @@ gen3_db_encrypt() { g3kubectl delete cronjob gitops-sync mv "$(gen3_secrets_folder)"/g3auto/arborist "$(gen3_secrets_folder)"/g3auto/arb-backup mv "$(gen3_secrets_folder)"/g3auto/metadata "$(gen3_secrets_folder)"/g3auto/mtdta-backup + mv "$(gen3_secrets_folder)"/g3auto/gearbox "$(gen3_secrets_folder)"/g3auto/gear-backup mv "$(gen3_secrets_folder)"/g3auto/wts "$(gen3_secrets_folder)"/g3auto/wts-backup mv "$(gen3_secrets_folder)"/g3auto/requestor "$(gen3_secrets_folder)"/g3auto/requestor-backup mv "$(gen3_secrets_folder)"/g3auto/audit "$(gen3_secrets_folder)"/g3auto/audit-backup @@ -647,6 +649,10 @@ gen3_db_encrypt() { g3kubectl delete secret metadata-g3auto gen3 db setup metadata fi + if [[ -d "$(gen3_secrets_folder)"/g3auto/gear-backup ]]; then + g3kubectl delete secret gearbox-g3auto + gen3 db setup gearbox + fi if [[ -d "$(gen3_secrets_folder)"/g3auto/wts-backup ]]; then g3kubectl delete secret wts-g3auto gen3 db setup wts @@ -677,6 +683,9 @@ gen3_db_encrypt() { gen3_log_info "restoring metadata db" gen3_db_reset "metadata" gen3 psql metadata < $dumpDir/metadata-backup.sql + gen3_log_info "restoring gearbox db" + gen3_db_reset "gearbox" + gen3 psql gearbox < $dumpDir/gearbox-backup.sql gen3_log_info "restoring wts db" gen3_db_reset "wts" gen3 psql wts < $dumpDir/wts-backup.sql @@ -838,6 +847,9 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then "list") gen3_db_list "$@" ;; + "init") + gen3_db_init "$@" + ;; "namespace") #simplify testing gen3_db_namespace ;; diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 84a3098a8..c024b4717 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -227,6 +227,8 @@ fi gen3 kube-setup-metadata & +gen3 kube-setup-gearbox & + if g3k_manifest_lookup .versions.ssjdispatcher 2>&1 /dev/null; then gen3 kube-setup-ssjdispatcher & fi diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh index 339f45aba..770d1d5c2 100644 --- a/gen3/bin/kube-setup-access-backend.sh +++ b/gen3/bin/kube-setup-access-backend.sh @@ -187,6 +187,8 @@ authz: - name: open - description: commons /mds-admin name: mds_gateway + - description: commons /gearbox-admin + name: gearbox_gateway - name: services subresources: - name: sheepdog @@ -276,6 +278,12 @@ authz: - /mds_gateway role_ids: - mds_user + - description: be able to use gearbox service + id: gearbox_admin + resource_paths: + - /gearbox_gateway + role_ids: + - gearbox_user - description: CRUD access to programs and projects id: services.sheepdog-admin resource_paths: @@ -369,6 +377,12 @@ authz: method: access service: mds_gateway id: mds_access + - id: gearbox_user + permissions: + - action: + method: access + service: gearbox_gateway + id: gearbox_access - description: sheepdog admin role for program project crud id: sheepdog_admin permissions: diff --git a/gen3/bin/kube-setup-gearbox.sh b/gen3/bin/kube-setup-gearbox.sh new file mode 100644 index 000000000..e4ee5455f --- /dev/null +++ b/gen3/bin/kube-setup-gearbox.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# +# Deploy the metdata service. +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +setup_database() { + gen3_log_info "setting up gearbox service ..." + + if g3kubectl describe secret gearbox-g3auto > /dev/null 2>&1; then + gen3_log_info "gearbox-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that gearbox-service consumes + if [[ ! -f "$secretsFolder/gearbox.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/gearbox" + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup gearbox; then + gen3_log_err "Failed setting up database for gearbox service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" + cat - > "$secretsFolder/gearbox.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup gearbox-g3auto secrets' +} + +if ! g3k_manifest_lookup .versions.gearbox 2> /dev/null; then + gen3_log_info "kube-setup-gearbox exiting - gearbox service not in manifest" + exit 0 +fi + +if ! setup_database; then + gen3_log_err "kube-setup-gearbox bailing out - database failed setup" + exit 1 +fi + +# The gearbox-config secret is a collection of arbitrary files at /gearbox +# Today, we only care about that secret if the directory exists. See gearbox-deploy and that +# this secret will be marked as optional for the pod, so it is OK if this secret is not created. +if [ -d "$(dirname $(g3k_manifest_path))/gearbox" ]; then + if g3kubectl get secrets gearbox-config > /dev/null 2>&1; then + # We want to re-create this on every setup to pull the latest state. + g3kubectl delete secret gearbox-config + fi +fi + +# Sync the manifest config from manifest.json (or manifests/gearbox.json) to the k8s config map. +# This may not actually create the manifest-gearbox config map if the user did not specify any gearbox +# keys in their manifest configuration. +gen3 gitops configmaps + + +gen3 roll gearbox +g3kubectl apply -f "${GEN3_HOME}/kube/services/gearbox/gearbox-service.yaml" + +if [[ -z "$GEN3_ROLL_ALL" ]]; then + gen3 kube-setup-networkpolicy + gen3 kube-setup-revproxy +fi + +gen3_log_info "The gearbox service has been deployed onto the kubernetes cluster" +gen3_log_info "test with: curl https://commons-host/gearbox/_status" diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 7a044dd64..e4d537f2f 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -20,12 +20,51 @@ gen3 jupyter j-namespace setup # (g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true -g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml" -gen3 roll hatchery -gen3 job cron hatchery-reaper '@daily' # cron job to distribute licenses if using Stata workspaces if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; then gen3 job cron distribute-licenses '* * * * *' fi + +policy=$( cat < /dev/null 2>&1; then + role_name="${vpc_name}-${saName}-role" + gen3 awsrole create $role_name $saName + policyName="hatchery-role-sts" + policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hathcery to assume csoc_adminvm role in other accounts, for multi-account workspaces") + if [ -n "$policyInfo" ]; then + policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; } + else + echo "Unable to create policy $policyName. Assuming it already exists and continuing" + policyArn=$(gen3_aws_run aws iam list-policies --query "Policies[?PolicyName=='$policyName'].Arn" --output text) + fi + + gen3_log_info "Attaching policy '${policyName}' to role '${role_name}'" + gen3 awsrole attach-policy ${policyArn} --role-name ${role_name} --force-aws-cli || exit 1 + gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${role_name} --force-aws-cli || exit 1 +fi + + +g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml" +gen3 roll hatchery +gen3 job cron hatchery-reaper '@daily' \ No newline at end of file diff --git a/gen3/bin/kube-setup-metadata.sh b/gen3/bin/kube-setup-metadata.sh index 0bc802459..db4f6dc49 100644 --- a/gen3/bin/kube-setup-metadata.sh +++ b/gen3/bin/kube-setup-metadata.sh @@ -78,13 +78,14 @@ fi # keys in their manifest configuration. gen3 gitops configmaps -# Check the manifest-metadata configmap to see if the aggregate mds feature is enabled. -if g3kubectl get configmap manifest-metadata -o json | jq -r '.data.json' | jq '.USE_AGG_MDS == true' > /dev/null 2>&1; then - gen3_log_info "kube-setup-metadata setting up aws-es-proxy dependency" - gen3 kube-setup-aws-es-proxy || true - wait_for_esproxy +# Check the manifest-metadata configmap to see if the aggregate mds feature is enabled. Skip aws-es-proxysetup if configmap doesn't exist. +if g3kubectl get configmap manifest-metadata > /dev/null 2>&1; then + if g3kubectl get configmap manifest-metadata -o json | jq -r '.data.json' | jq '.USE_AGG_MDS == true' > /dev/null 2>&1; then + gen3_log_info "kube-setup-metadata setting up aws-es-proxy dependency" + gen3 kube-setup-aws-es-proxy || true + wait_for_esproxy + fi fi - gen3 roll metadata g3kubectl apply -f "${GEN3_HOME}/kube/services/metadata/metadata-service.yaml" diff --git a/gen3/bin/kube-setup-secrets.sh b/gen3/bin/kube-setup-secrets.sh index a92b28421..3694f4b5f 100644 --- a/gen3/bin/kube-setup-secrets.sh +++ b/gen3/bin/kube-setup-secrets.sh @@ -137,8 +137,6 @@ fi cd "$(gen3_secrets_folder)" mkdir -p jwt-keys mkdir -p ssh-keys -mkdir -p analysis-jwt-keys -mkdir -p g3auto/analysis-jwt-keys # Create keypairs for fence. Following the requirements from fence, the # keypairs go in subdirectories of the base keys directory, where the @@ -186,32 +184,43 @@ if ! g3kubectl get configmaps/fence > /dev/null 2>&1; then g3kubectl create configmap fence --from-file=apis_configs/user.yaml fi -# Analysis jwt keys +# Generate private and public keys for each service listed (except fence) +services_list=( "analysis" "amanuensis" ) + # make directories for temporary credentials timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") -# generate private and public key for pcdcanalysistool yearMonth="$(date +%Y-%m)" -if [[ ! -d ./analysis-jwt-keys ]] || ! (ls ./analysis-jwt-keys | grep "$yearMonth" > /dev/null 2>&1); then - echo "Generating analysis OAUTH key pairs - analysis-jwt-keys" - mkdir -p analysis-jwt-keys/${timestamp} - - openssl genpkey -algorithm RSA -out analysis-jwt-keys/${timestamp}/jwt_private_key.pem \ - -pkeyopt rsa_keygen_bits:2048 - openssl rsa -pubout -in analysis-jwt-keys/${timestamp}/jwt_private_key.pem \ - -out analysis-jwt-keys/${timestamp}/jwt_public_key.pem - chmod -R a+rx analysis-jwt-keys - - cp analysis-jwt-keys/${timestamp}/jwt_private_key.pem analysis-jwt-keys/jwt_private_key.pem - cp analysis-jwt-keys/${timestamp}/jwt_public_key.pem analysis-jwt-keys/jwt_public_key.pem - - # TODO unclear why these needs to go in the g3auto folder instead of staying in the main directory with the other ? - cp analysis-jwt-keys/jwt_private_key.pem ./g3auto/analysis-jwt-keys/jwt_private_key.pem - cp analysis-jwt-keys/jwt_public_key.pem ./g3auto/analysis-jwt-keys/jwt_public_key.pem - - gen3 secrets sync 'chore(analysis-jwt-keys): initial setup' - # TODO you can probably remove the following line or add an if statment checking if it find somethign or not - gen3 secrets decode analysis-jwt-keys-g3auto -fi +# Create key sets for each service +for service_name in "${services_list[@]}"; do + + service_key_dir="${service_name}-jwt-keys" + gen3auto_service_key_dir="g3auto/${service_key_dir}" + + mkdir -p ${service_key_dir} + mkdir -p ${gen3auto_service_key_dir} + + if [[ ! -d ./$service_key_dir ]] || ! (ls ./$service_key_dir | grep "$yearMonth" > /dev/null 2>&1); then + echo "Generating OAUTH key pairs - $service_key_dir" + mkdir -p $service_key_dir/${timestamp} + + openssl genpkey -algorithm RSA -out ${service_key_dir}/${timestamp}/jwt_private_key.pem \ + -pkeyopt rsa_keygen_bits:2048 + openssl rsa -pubout -in ${service_key_dir}/${timestamp}/jwt_private_key.pem \ + -out ${service_key_dir}/${timestamp}/jwt_public_key.pem + chmod -R a+rx ${service_key_dir} + + cp ${service_key_dir}/${timestamp}/jwt_private_key.pem ${service_key_dir}/jwt_private_key.pem + cp ${service_key_dir}/${timestamp}/jwt_public_key.pem ${service_key_dir}/jwt_public_key.pem + + # TODO unclear why these needs to go in the g3auto folder instead of staying in the main directory with the other ? + cp ${service_key_dir}/jwt_private_key.pem ./${gen3auto_service_key_dir}/jwt_private_key.pem + cp ${service_key_dir}/jwt_public_key.pem ./${gen3auto_service_key_dir}/jwt_public_key.pem + + gen3 secrets sync 'chore(${service_key_dir}): initial setup' + # TODO you can probably remove the following line or add an if statment checking if it find something or not + gen3 secrets decode ${service_key_dir}-g3auto + fi +done # old fence cfg method uses fence-secret and fence-json-secret if ! g3kubectl get secrets/fence-secret > /dev/null 2>&1; then diff --git a/gen3/bin/save-failed-pod-logs.sh b/gen3/bin/save-failed-pod-logs.sh index 7013719e0..07e27a6d0 100644 --- a/gen3/bin/save-failed-pod-logs.sh +++ b/gen3/bin/save-failed-pod-logs.sh @@ -40,8 +40,9 @@ for pod in "${array_of_svc_startup_errors[@]}"; do gen3_log_info "storing kubectl logs output into svc_startup_error_${pod_name}.log..." container_name=$(g3kubectl get pod ${pod_name} -o jsonpath='{.spec.containers[0].name}') g3kubectl logs $pod_name -c ${container_name} > svc_startup_error_${pod_name}.log + g3kubectl describe pod $pod_name > describe_pod_${pod_name}.log gen3_log_info "capturing kube events..." - g3kubectl get events > kubectl_get_events.log + g3kubectl get events --sort-by=.metadata.creationTimestamp > kubectl_get_events.log done gen3_log_info "looking for pods with restarting containers..." @@ -68,6 +69,7 @@ for pod in "${array_of_pods[@]}"; do g3kubectl logs $pod_name -c ${container_name} | tail -n10 # TODO: this is not being archived by pipelineHelper.teardown for some reason :/ g3kubectl logs $pod_name -c ${container_name} > svc_startup_error_${pod_name}_${container_name}.log + g3kubectl describe pod $pod_name > describe_pod_${pod_name}.log realpath svc_startup_error_${pod_name}.log done fi diff --git a/gen3/lib/manifestDefaults/scaling/scaling.json b/gen3/lib/manifestDefaults/scaling/scaling.json index 70d4aa47a..bd6ddfb7e 100644 --- a/gen3/lib/manifestDefaults/scaling/scaling.json +++ b/gen3/lib/manifestDefaults/scaling/scaling.json @@ -29,6 +29,12 @@ "max": 4, "targetCpu": 40 }, + "gearbox": { + "strategy": "auto", + "min": 1, + "max": 4, + "targetCpu": 40 + }, "peregrine": { "strategy": "auto", "min": 2, diff --git a/gen3/lib/testData/manifest.global.g3k/manifests/scaling/scaling.json b/gen3/lib/testData/manifest.global.g3k/manifests/scaling/scaling.json index 896fe590e..63c76ca49 100644 --- a/gen3/lib/testData/manifest.global.g3k/manifests/scaling/scaling.json +++ b/gen3/lib/testData/manifest.global.g3k/manifests/scaling/scaling.json @@ -23,6 +23,12 @@ "max": 4, "targetCpu": 40 }, + "gearbox": { + "strategy": "auto", + "min": 1, + "max": 4, + "targetCpu": 40 + }, "peregrine": { "strategy": "auto", "min": 2, diff --git a/kube/services/amanuensis/amanuensis-deploy.yaml b/kube/services/amanuensis/amanuensis-deploy.yaml index c92fdfd17..d68e671c1 100644 --- a/kube/services/amanuensis/amanuensis-deploy.yaml +++ b/kube/services/amanuensis/amanuensis-deploy.yaml @@ -58,6 +58,12 @@ spec: - name: amanuensis-volume secret: secretName: "amanuensis-creds" + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys-g3auto" + items: + - key: jwt_private_key.pem + path: jwt_private_key.pem securityContext: # nginx group in current images fsGroup: 101 @@ -116,6 +122,10 @@ spec: readOnly: true mountPath: "/var/www/amanuensis/creds.json" subPath: creds.json + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/var/www/amanuensis/jwt_private_key.pem" + subPath: "jwt_private_key.pem" resources: requests: cpu: 0.4 diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index ee19f0a95..da986d117 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -41,12 +41,16 @@ spec: readOnly: true mountPath: "/root/.aws" command: ["/bin/sh"] - # NOTE- NEED TO RUN kube-set-aws-es-proxy TO POPULATE ES_ENDPOINT - ugh! - # NOTE- gen3 roll aws-es-proxy WILL NOT WORK! + # NOTE- NEED TO RUN `gen3 kube-setup-aws-es-proxy` TO POPULATE ES_ENDPOINT - ugh! + # NOTE- `gen3 roll aws-es-proxy` WILL NOT WORK! args: - "-c" - | - if [ -f /usr/local/bin/aws-es-proxy ]; + if [ -f /aws-es-proxy ]; + then + # 1.3 needs this PR: https://github.com/uc-cdis/aws-es-proxy/pull/2 + BINARY=/aws-es-proxy + elif [ -f /usr/local/bin/aws-es-proxy ]; then # 0.9 BINARY=/usr/local/bin/aws-es-proxy @@ -57,6 +61,9 @@ spec: fi ${BINARY} -endpoint "https://$ES_ENDPOINT" -verbose -listen ":9200" resources: - limits: - cpu: 0.3 + requests: + cpu: 250m memory: 256Mi + limits: + cpu: 1 + memory: 2Gi diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 8804d37b0..5aeba139f 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -90,6 +90,12 @@ spec: - name: yaml-merge configMap: name: "fence-yaml-merge" + - name: amanuensis-jwt-keys + secret: + secretName: "amanuensis-jwt-keys-g3auto" + items: + - key: jwt_public_key.pem + path: jwt_public_key.pem securityContext: # nginx group in current images fsGroup: 101 @@ -222,6 +228,10 @@ spec: readOnly: true mountPath: "/fence/jwt-keys.tar" subPath: "jwt-keys.tar" + - name: "amanuensis-jwt-keys" + readOnly: true + mountPath: "/amanuensis/jwt_public_key.pem" + subPath: "jwt_public_key.pem" resources: requests: cpu: 0.4 diff --git a/kube/services/gearbox/gearbox-deploy.yaml b/kube/services/gearbox/gearbox-deploy.yaml new file mode 100644 index 000000000..8a11cce0e --- /dev/null +++ b/kube/services/gearbox/gearbox-deploy.yaml @@ -0,0 +1,117 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gearbox-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: gearbox + release: production + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: gearbox + release: production + public: "yes" + # allow access from workspaces + userhelper: "yes" + # for network policy + netnolimit: "yes" + GEN3_DATE_LABEL + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - gearbox + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + volumes: + - name: config-volume-g3auto + secret: + secretName: gearbox-g3auto + # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this + # is only available if a /gearbox directory exists. + - name: config-volume + secret: + secretName: gearbox-config + optional: true + # This volume may or may not be needed or available. See kube-setup-gearbox.sh and note that this + # may not exist if the commons does not have any gearbox manifest configuration. + - name: config-manifest + configMap: + name: manifest-gearbox + optional: true + containers: + - name: gearbox + GEN3_GEARBOX_IMAGE + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: GEN3_ES_ENDPOINT + value: http://esproxy-service:9200 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /_status + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /_status + port: 80 + ports: + - containerPort: 80 + volumeMounts: + - name: config-volume-g3auto + readOnly: true + mountPath: /src/.env + subPath: gearbox.env + - name: config-volume + readOnly: true + mountPath: /aggregate_config.json + subPath: aggregate_config.json + - name: config-manifest + readOnly: true + mountPath: /gearbox.json + subPath: json + resources: + requests: + cpu: 0.4 + memory: 512Mi + limits: + cpu: 1 + memory: 2048Mi + initContainers: + - name: gearbox-db-migrate + GEN3_GEARBOX_IMAGE + imagePullPolicy: Always + volumeMounts: + - name: config-volume-g3auto + readOnly: true + mountPath: /src/.env + subPath: gearbox.env + resources: + limits: + cpu: 0.8 + memory: 512Mi + command: ["/bin/sh"] + args: + - "-c" + - | + /env/bin/alembic upgrade head diff --git a/kube/services/gearbox/gearbox-service.yaml b/kube/services/gearbox/gearbox-service.yaml new file mode 100644 index 000000000..4a9f0bf47 --- /dev/null +++ b/kube/services/gearbox/gearbox-service.yaml @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 +metadata: + name: gearbox-service +spec: + selector: + app: gearbox + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: http + type: ClusterIP + diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 21a6801bc..2cc6bc0c1 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -18,6 +18,10 @@ spec: labels: app: guppy public: "yes" + netnolimit: "yes" + tags.datadoghq.com/service: "guppy" + GEN3_GUPPY_VERSION + GEN3_ENV_LABEL GEN3_DATE_LABEL spec: affinity: @@ -97,6 +101,34 @@ spec: name: manifest-global key: tier_access_limit optional: true + - name: DD_TRACE_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_TRACE_AGENT_HOSTNAME + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - name: guppy-config readOnly: true @@ -115,7 +147,6 @@ spec: readOnly: true mountPath: "/guppy/jwt_public_key.pem" subPath: "jwt_public_key.pem" - imagePullPolicy: Always resources: requests: diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index d79bef728..5ac1bb805 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -21,7 +21,7 @@ spec: netnolimit: "yes" userhelper: "yes" tags.datadoghq.com/service: "hatchery" - tags.datadoghq.com/version: "master" + GEN3_HATCHERY_VERSION GEN3_ENV_LABEL GEN3_DATE_LABEL spec: @@ -62,6 +62,11 @@ spec: ports: - containerPort: 8000 env: + - name: GEN3_VPCID + valueFrom: + configMapKeyRef: + name: global + key: environment - name: GEN3_ENDPOINT valueFrom: configMapKeyRef: diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index db9dfe070..580cfb8a2 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -80,7 +80,7 @@ spec: ports: - containerPort: 80 - containerPort: 443 - command: + command: - /bin/bash - ./dockerStart.sh env: @@ -133,6 +133,12 @@ spec: name: manifest-global key: tier_access_limit optional: true + - name: REACT_APP_GA_TRACKING_ID + valueFrom: + configMapKeyRef: + name: manifest-global + key: react_app_ga_tracking_id + optional: true - name: FENCE_URL valueFrom: configMapKeyRef: diff --git a/kube/services/revproxy/gen3.nginx.conf/gearbox-service.conf b/kube/services/revproxy/gen3.nginx.conf/gearbox-service.conf new file mode 100644 index 000000000..e31d55b96 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gearbox-service.conf @@ -0,0 +1,41 @@ + location /gearbox/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gearbox-service"; + set $upstream http://gearbox-service$des_domain; + rewrite ^/gearbox/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox/; + client_max_body_size 0; + } + + location /gearbox-admin/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + set $authz_resource "/gearbox_gateway"; + set $authz_method "access"; + set $authz_service "gearbox_gateway"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $gearbox_password "Basic ${gearbox_b64}"; + + # For testing: + #add_header Set-Cookie "X-Frickjack=${gearbox_password};Path=/;Max-Age=600"; + set $proxy_service "gearbox-service"; + set $upstream http://gearbox-service$des_domain; + rewrite ^/gearbox-admin/(.*) /$1 break; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For "$realip"; + proxy_set_header X-UserId "$userid"; + proxy_set_header X-SessionId "$session_id"; + proxy_set_header X-VisitorId "$visitor_id"; + proxy_set_header Authorization "$gearbox_password"; + + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/gearbox-admin/; + client_max_body_size 0; + } diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index e40c99fcf..db2de5886 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,4 +1,9 @@ location /guppy/ { + proxy_connect_timeout 600s; + proxy_send_timeout 600s; + proxy_read_timeout 600s; + send_timeout 600s; + set $proxy_service "guppy"; # upstream is written to logs set $upstream http://guppy-service.$namespace.svc.cluster.local; diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index b7e9eb43c..0d65454c5 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -23,6 +23,7 @@ env DES_NAMESPACE; env MAINTENANCE_MODE; env INDEXD_AUTHZ; env MDS_AUTHZ; +env GEARBOX_AUTHZ; events { worker_connections 768; @@ -165,7 +166,8 @@ perl_set $csrf_cookie_domain 'sub { return $ENV{"COOKIE_DOMAIN"} ? qq{;domain=$E perl_set $indexd_b64 'sub { $_ = $ENV{"INDEXD_AUTHZ"}; chomp; return "$_"; }'; # metadata service password for admin endpoint perl_set $mds_b64 'sub { $_ = $ENV{"MDS_AUTHZ"}; chomp; return "$_"; }'; - +# gearbox service password for admin endpoint +perl_set $gearbox_b64 'sub { $_ = $ENV{"GEARBOX_AUTHZ"}; chomp; return "$_"; }'; server { listen 6567; @@ -320,7 +322,8 @@ server { proxy_buffers 8 16k; proxy_busy_buffers_size 32k; client_body_buffer_size 16k; - + proxy_read_timeout 300; + # # also incoming from client: # * https://fullvalence.com/2016/07/05/cookie-size-in-nginx/ diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 3f4006cb8..6f2b53d28 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -125,6 +125,12 @@ spec: name: metadata-g3auto key: base64Authz.txt optional: true + - name: GEARBOX_AUTHZ + valueFrom: + secretKeyRef: + name: gearbox-g3auto + key: base64Authz.txt + optional: true volumeMounts: - name: "revproxy-conf" readOnly: true diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index bb6928374..dbe46a734 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -35,12 +35,32 @@ spec: containers: - name: thor image: quay.io/cdis/thor:master + env: + - name: RUNNING_IN_QAPLANETV1 + value: "true" + - name: JENKINS_USERNAME + value: "PlanXCyborg" + - name: JENKINS_API_TOKEN + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "jenkins_api_token.json" + - name: JENKINS2_API_TOKEN + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "jenkins2_api_token.json" + - name: JENKINS_JOB_TOKEN + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "jenkins_job_token.json" imagePullPolicy: Always volumeMounts: - name: config-volume-g3auto readOnly: true - mountPath: /src/.env - subPath: metadata.env + mountPath: /src/thor.env + subPath: thor.env volumes: - name: config-volume-g3auto secret: diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index b9fe67740..c9a05110d 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -3,6 +3,8 @@ apiVersion: apps/v1 kind: Deployment metadata: name: wts-deployment + annotations: + gen3.io/network-ingress: "mariner" spec: selector: # Only select pods based on the 'app' label diff --git a/tf_files/aws/commons/cloud.tf b/tf_files/aws/commons/cloud.tf index 34e062011..6d3f1c561 100644 --- a/tf_files/aws/commons/cloud.tf +++ b/tf_files/aws/commons/cloud.tf @@ -38,6 +38,8 @@ module "cdis_vpc" { squid_extra_vars = "${var.ha-squid_extra_vars}" single_squid_instance_type = "${var.single_squid_instance_type}" network_expansion = "${var.network_expansion}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } # logs bucket for elb logs diff --git a/tf_files/aws/commons/variables.tf b/tf_files/aws/commons/variables.tf index 75434e5dd..54093c4fa 100644 --- a/tf_files/aws/commons/variables.tf +++ b/tf_files/aws/commons/variables.tf @@ -483,3 +483,11 @@ variable "indexd_max_allocated_storage" { description = "Maximum allocated storage for autosacaling" default = 0 } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} diff --git a/tf_files/aws/modules/eks-nodepool/cloud.tf b/tf_files/aws/modules/eks-nodepool/cloud.tf index b405605bf..7f58e83f8 100644 --- a/tf_files/aws/modules/eks-nodepool/cloud.tf +++ b/tf_files/aws/modules/eks-nodepool/cloud.tf @@ -110,7 +110,9 @@ resource "aws_iam_policy" "access_to_kernels" { ], "Resource": [ "arn:aws:s3:::gen3-kernels/*", - "arn:aws:s3:::gen3-kernels" + "arn:aws:s3:::gen3-kernels", + "arn:aws:s3:::qualys-agentpackage", + "arn:aws:s3:::qualys-agentpackage/*" ] } ] diff --git a/tf_files/aws/modules/squid_auto/cloud.tf b/tf_files/aws/modules/squid_auto/cloud.tf index acc658049..d72d1e036 100644 --- a/tf_files/aws/modules/squid_auto/cloud.tf +++ b/tf_files/aws/modules/squid_auto/cloud.tf @@ -77,6 +77,14 @@ data "aws_iam_policy_document" "squid_policy_document" { effect = "Allow" resources = ["*"] } + statement { + actions = [ + "s3:Get*", + "s3:List*" + ] + effect = "Allow" + resources = ["arn:aws:s3:::qualys-agentpackage", "arn:aws:s3:::qualys-agentpackage/*"] + } } ################## @@ -142,6 +150,15 @@ CLOUD_AUTOMATION="$USER_HOME/cloud-automation" bash "${var.bootstrap_path}${var.bootstrap_script}" "cwl_group=${var.env_log_group};${join(";",var.extra_vars)}" 2>&1 cd $CLOUD_AUTOMATION git checkout master + # Install qualys agent if the activtion and customer id provided + if [[ ! -z "${var.activation_id}" ]] || [[ ! -z "${var.customer_id}" ]]; then + apt install awscli -y + aws s3 cp s3://qualys-agentpackage/QualysCloudAgent.deb ./qualys-cloud-agent.x86_64.deb + dpkg -i ./qualys-cloud-agent.x86_64.deb + # Clean up deb package after install + rm qualys-cloud-agent.x86_64.deb + sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${var.activation_id} CustomerId=${var.customer_id} + fi ) > /var/log/bootstrapping_script.log EOF diff --git a/tf_files/aws/modules/squid_auto/variables.tf b/tf_files/aws/modules/squid_auto/variables.tf index 896ccda5a..9c7786929 100644 --- a/tf_files/aws/modules/squid_auto/variables.tf +++ b/tf_files/aws/modules/squid_auto/variables.tf @@ -115,4 +115,12 @@ variable "network_expansion" { variable "squid_depends_on" { default = "" -} \ No newline at end of file +} + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} diff --git a/tf_files/aws/modules/utility-vm/cloud.tf b/tf_files/aws/modules/utility-vm/cloud.tf index 309d86e0e..c80abe6a0 100644 --- a/tf_files/aws/modules/utility-vm/cloud.tf +++ b/tf_files/aws/modules/utility-vm/cloud.tf @@ -238,6 +238,13 @@ CLOUD_AUTOMATION="$USER_HOME/cloud-automation" git checkout master ) > /var/log/bootstrapping_script.log - +# Install qualys agent if the activtion and customer id provided +if [[ ! -z "${var.activation_id}" ]] || [[ ! -z "${var.customer_id}" ]]; then + aws s3 cp s3://qualys-agentpackage/QualysCloudAgent.rpm ./qualys-cloud-agent.x86_64.rpm + sudo rpm -ivh qualys-cloud-agent.x86_64.rpm + # Clean up rpm package after install + rm qualys-cloud-agent.x86_64.rpm + sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${var.activation_id} CustomerId=${var.customer_id} +fi EOF } diff --git a/tf_files/aws/modules/utility-vm/variables.tf b/tf_files/aws/modules/utility-vm/variables.tf index 21ac9658f..b8da12cba 100644 --- a/tf_files/aws/modules/utility-vm/variables.tf +++ b/tf_files/aws/modules/utility-vm/variables.tf @@ -116,3 +116,11 @@ variable "user_policy" { } POLICY } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} diff --git a/tf_files/aws/modules/vpc/cloud.tf b/tf_files/aws/modules/vpc/cloud.tf index 0b4fe2915..18bf4de1d 100644 --- a/tf_files/aws/modules/vpc/cloud.tf +++ b/tf_files/aws/modules/vpc/cloud.tf @@ -43,6 +43,8 @@ module "squid-auto" { cluster_desired_capasity = "${var.squid_cluster_desired_capasity}" network_expansion = "${var.network_expansion}" squid_depends_on = "${aws_nat_gateway.nat_gw.id}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" } module "data-bucket" { diff --git a/tf_files/aws/modules/vpc/variables.tf b/tf_files/aws/modules/vpc/variables.tf index 5d1ce6867..17803eb51 100644 --- a/tf_files/aws/modules/vpc/variables.tf +++ b/tf_files/aws/modules/vpc/variables.tf @@ -110,3 +110,11 @@ variable "network_expansion" { description = "Let k8s wokers use /22 subnets per AZ" default = false } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} diff --git a/tf_files/aws/squid_auto/root.tf b/tf_files/aws/squid_auto/root.tf index be4957dec..71690e990 100644 --- a/tf_files/aws/squid_auto/root.tf +++ b/tf_files/aws/squid_auto/root.tf @@ -36,6 +36,8 @@ module "squid_auto" { cluster_min_size = "${var.cluster_min_size}" network_expansion = "${var.network_expansion}" branch = "${var.branch}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" # put other variables here ... } diff --git a/tf_files/aws/squid_auto/variables.tf b/tf_files/aws/squid_auto/variables.tf index 5a53e65e1..2c9dcbb76 100644 --- a/tf_files/aws/squid_auto/variables.tf +++ b/tf_files/aws/squid_auto/variables.tf @@ -111,3 +111,11 @@ variable "network_expansion" { variable "deploy_ha_squid" { default = true } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +} diff --git a/tf_files/aws/utility_vm/root.tf b/tf_files/aws/utility_vm/root.tf index 3767fc001..4babb8b87 100644 --- a/tf_files/aws/utility_vm/root.tf +++ b/tf_files/aws/utility_vm/root.tf @@ -27,6 +27,8 @@ module "utility_vm" { organization_name = "${var.organization_name}" branch = "${var.branch}" user_policy = "${var.user_policy}" + activation_id = "${var.activation_id}" + customer_id = "${var.customer_id}" # put other variables here ... } diff --git a/tf_files/aws/utility_vm/variables.tf b/tf_files/aws/utility_vm/variables.tf index fb48b0bcb..605b7de85 100644 --- a/tf_files/aws/utility_vm/variables.tf +++ b/tf_files/aws/utility_vm/variables.tf @@ -96,3 +96,11 @@ variable "user_policy" { } POLICY } + +variable "activation_id" { + default = "" +} + +variable "customer_id" { + default = "" +}