Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature | Merge EKS clusters into one #659

Merged
merged 15 commits into from
Jan 21, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Change switchboard layout
  • Loading branch information
angelofenoglio committed Nov 26, 2024
commit 4cbdd68ea8885da8bf08b010411c3260973ffa80
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,25 @@ data "aws_secretsmanager_secret_version" "argocd_admin_password" {
}

data "aws_secretsmanager_secret_version" "demo_google_microservices_deploy_key" {
count = var.argocd.enabled ? 1 : 0
provider = aws.shared
secret_id = "/repositories/demo-google-microservices/deploy_key"
}

data "aws_secretsmanager_secret_version" "le_demo_deploy_key" {
count = var.argocd.enabled ? 1 : 0
provider = aws.shared
secret_id = "/repositories/le-demo-apps/deploy_key"
}

data "aws_secretsmanager_secret_version" "argocd_slack_notification_app_oauth" {
count = var.argocd.enabled && var.argocd.enableNotifications ? 1 : 0
provider = aws.shared
secret_id = "/notifications/devstg/argocd"
}

resource "helm_release" "argocd" {
count = var.enable_cicd ? 1 : 0
count = var.argocd.enabled ? 1 : 0

name = "argocd"
namespace = kubernetes_namespace.argocd[0].id
Expand Down Expand Up @@ -78,7 +81,7 @@ resource "helm_release" "argocd" {
# ArgoCD Image Updater
#------------------------------------------------------------------------------
resource "helm_release" "argocd_image_updater" {
count = var.enable_argocd_image_updater ? 1 : 0
count = var.argocd.image_updater.enabled ? 1 : 0
name = "argocd-image-updater"
namespace = kubernetes_namespace.argocd[0].id
repository = "https://argoproj.github.io/argo-helm"
Expand Down Expand Up @@ -113,7 +116,7 @@ resource "helm_release" "argocd_image_updater" {
# Argo Rollouts
#------------------------------------------------------------------------------
resource "helm_release" "argo_rollouts" {
count = var.enable_argo_rollouts ? 1 : 0
count = var.argocd.rollouts.enabled ? 1 : 0

name = "argo-rollouts"
namespace = kubernetes_namespace.argocd[0].id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Service Account & Permissions: External Prometheus
#------------------------------------------------------------------------------
resource "kubernetes_cluster_role" "external_prometheus" {
count = var.enable_prometheus_dependencies ? 1 : 0
count = var.prometheus.external.dependencies.enabled ? 1 : 0

metadata {
name = "external-prometheus"
Expand Down Expand Up @@ -38,7 +38,7 @@ resource "kubernetes_cluster_role" "external_prometheus" {
}

resource "kubernetes_cluster_role_binding" "external_prometheus" {
count = var.enable_prometheus_dependencies ? 1 : 0
count = var.prometheus.external.dependencies.enabled ? 1 : 0

metadata {
name = "external-prometheus"
Expand All @@ -56,7 +56,7 @@ resource "kubernetes_cluster_role_binding" "external_prometheus" {
}

resource "kubernetes_service_account" "external_prometheus" {
count = var.enable_prometheus_dependencies ? 1 : 0
count = var.prometheus.external.dependencies.enabled ? 1 : 0

metadata {
name = "external-prometheus"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Service Account & Permissions: Grafana KubeGraf Application
#------------------------------------------------------------------------------
resource "kubernetes_cluster_role" "grafana_kubegraf" {
count = var.enable_grafana_dependencies ? 1 : 0
count = var.prometheus.external.grafana_dependencies.enabled ? 1 : 0

metadata {
name = "grafana-kubegraf"
Expand Down Expand Up @@ -42,7 +42,7 @@ resource "kubernetes_cluster_role" "grafana_kubegraf" {
}

resource "kubernetes_cluster_role_binding" "grafana_kubegraf" {
count = var.enable_grafana_dependencies ? 1 : 0
count = var.prometheus.external.grafana_dependencies.enabled ? 1 : 0

metadata {
name = "grafana-kubegraf"
Expand All @@ -60,7 +60,7 @@ resource "kubernetes_cluster_role_binding" "grafana_kubegraf" {
}

resource "kubernetes_service_account" "grafana_kubegraf" {
count = var.enable_grafana_dependencies ? 1 : 0
count = var.prometheus.external.grafana_dependencies.enabled ? 1 : 0

metadata {
name = "grafana-kubegraf"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ locals {
alb_ingress_to_nginx_ingress_tags_list = [
for k, v in local.alb_ingress_to_nginx_ingress_tags_map : "${k}=${v}"
]
eks_alb_logging_prefix = var.eks_alb_logging_prefix != "" ? var.eks_alb_logging_prefix : data.terraform_remote_state.cluster.outputs.cluster_name
eks_alb_logging_prefix = var.ingress.apps_ingress.logging.prefix != "" ? var.ingress.apps_ingress.logging.prefix : data.terraform_remote_state.cluster.outputs.cluster_name

#------------------------------------------------------------------------------
# Argo Settings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# around to immediately check what's wrong.
#------------------------------------------------------------------------------
resource "helm_release" "kwatch" {
count = var.enable_kwatch ? 1 : 0
count = var.kwatch.enabled ? 1 : 0
name = "kwatch"
namespace = kubernetes_namespace.monitoring_alerts[0].id
repository = "https://kwatch.dev/charts"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ resource "helm_release" "kube_resource_report" {
# `http://localhost:9090
#------------------------------------------------------------------------------
resource "helm_release" "cost_analyzer" {
count = var.cost_optimization.cost_analyzer && !var.enable_prometheus_stack ? 1 : 0
count = var.cost_optimization.cost_analyzer && !var.prometheus.kube_stack.enabled ? 1 : 0
name = "cost-analyzer"
namespace = kubernetes_namespace.monitoring_tools[0].id
repository = "https://kubecost.github.io/cost-analyzer/"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Kube State Metrics: Expose cluster metrics.
#------------------------------------------------------------------------------
resource "helm_release" "kube_state_metrics" {
count = var.enable_prometheus_dependencies ? 1 : 0
count = var.prometheus.external.dependencies.enabled ? 1 : 0
name = "kube-state-metrics"
namespace = kubernetes_namespace.monitoring_metrics[0].id
repository = "https://charts.bitnami.com/bitnami"
Expand All @@ -15,7 +15,7 @@ resource "helm_release" "kube_state_metrics" {
# Node Exporter: Expose cluster node metrics.
# ------------------------------------------------------------------------------
resource "helm_release" "node_exporter" {
count = var.enable_prometheus_dependencies ? 1 : 0
count = var.prometheus.external.dependencies.enabled ? 1 : 0
name = "node-exporter"
namespace = kubernetes_namespace.monitoring_metrics[0].id
repository = "https://charts.bitnami.com/bitnami"
Expand All @@ -28,7 +28,7 @@ resource "helm_release" "node_exporter" {
# Metrics Server: Expose cluster metrics.
#------------------------------------------------------------------------------
resource "helm_release" "metrics_server" {
count = (var.enable_hpa_scaling || var.enable_vpa_scaling) ? 1 : 0
count = (var.scaling.hpa.enabled || var.scaling.vpa.enabled) ? 1 : 0
name = "metrics-server"
namespace = kubernetes_namespace.monitoring_metrics[0].id
repository = "https://charts.bitnami.com/bitnami"
Expand All @@ -41,7 +41,7 @@ resource "helm_release" "metrics_server" {
# Prometheus Stack: (in-cluster) Prometheus, Grafana, and AlertManager.
#------------------------------------------------------------------------------
resource "helm_release" "kube_prometheus_stack" {
count = var.kube_prometheus_stack.enabled && !var.cost_optimization.cost_analyzer ? 1 : 0
count = var.prometheus.kube_stack.enabled && !var.cost_optimization.cost_analyzer ? 1 : 0
name = "kube-prometheus-stack"
namespace = kubernetes_namespace.prometheus[0].id
repository = "https://prometheus-community.github.io/helm-charts"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Datadog Agent
#------------------------------------------------------------------------------
resource "helm_release" "datadog_agent" {
count = var.enable_datadog_agent ? 1 : 0
count = var.datadog_agent.enabled ? 1 : 0
name = "datadog"
namespace = kubernetes_namespace.monitoring_other[0].id
repository = "https://helm.datadoghq.com"
Expand Down Expand Up @@ -37,7 +37,7 @@ resource "helm_release" "datadog_agent" {
# - Back up the volume used by Kuma and define/rehearse the restore procedure.
#------------------------------------------------------------------------------
resource "helm_release" "uptime_kuma" {
count = var.enable_uptime_kuma ? 1 : 0
count = var.uptime_kuma.enabled ? 1 : 0
name = "uptime-kuma"
namespace = kubernetes_namespace.monitoring_other[0].id
repository = "https://helm.irsigler.cloud"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
resource "kubernetes_namespace" "monitoring_metrics" {
count = var.enable_prometheus_dependencies || var.enable_prometheus_dependencies || var.enable_cluster_autoscaling || var.enable_hpa_scaling || var.enable_vpa_scaling ? 1 : 0
count = var.prometheus.external.dependencies.enabled || var.scaling.cluster_autoscaling.enabled || var.scaling.hpa.enabled || var.scaling.vpa.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -17,7 +17,7 @@ resource "kubernetes_namespace" "monitoring_logging" {
}

resource "kubernetes_namespace" "monitoring_tools" {
count = var.enable_kubernetes_dashboard || var.enable_vpa_scaling || var.cost_optimization.kube_resource_report || var.cost_optimization.cost_analyzer ? 1 : 0
count = var.scaling.vpa.enabled || var.cost_optimization.kube_resource_report || var.cost_optimization.cost_analyzer ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -26,7 +26,7 @@ resource "kubernetes_namespace" "monitoring_tools" {
}

resource "kubernetes_namespace" "monitoring_other" {
count = var.enable_datadog_agent || var.enable_uptime_kuma ? 1 : 0
count = var.datadog_agent.enabled || var.uptime_kuma.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -35,7 +35,7 @@ resource "kubernetes_namespace" "monitoring_other" {
}

resource "kubernetes_namespace" "monitoring_alerts" {
count = var.enable_kwatch ? 1 : 0
count = var.kwatch.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -44,7 +44,7 @@ resource "kubernetes_namespace" "monitoring_alerts" {
}

resource "kubernetes_namespace" "ingress_nginx" {
count = var.enable_nginx_ingress_controller ? 1 : 0
count = var.ingress.nginx_controller.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -53,7 +53,7 @@ resource "kubernetes_namespace" "ingress_nginx" {
}

resource "kubernetes_namespace" "alb_ingress" {
count = var.enable_alb_ingress_controller ? 1 : 0
count = var.ingress.alb_controller.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -62,7 +62,7 @@ resource "kubernetes_namespace" "alb_ingress" {
}

resource "kubernetes_namespace" "certmanager" {
count = var.enable_certmanager ? 1 : 0
count = var.certmanager.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -71,7 +71,7 @@ resource "kubernetes_namespace" "certmanager" {
}

resource "kubernetes_namespace" "externaldns" {
count = var.enable_private_dns_sync || var.enable_public_dns_sync ? 1 : 0
count = var.dns_sync.private.enabled || var.dns_sync.private.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -80,7 +80,7 @@ resource "kubernetes_namespace" "externaldns" {
}

resource "kubernetes_namespace" "external-secrets" {
count = var.enable_external_secrets ? 1 : 0
count = var.external_secrets.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -89,7 +89,7 @@ resource "kubernetes_namespace" "external-secrets" {
}

resource "kubernetes_namespace" "argocd" {
count = var.enable_cicd || var.enable_argocd_image_updater || var.enable_argo_rollouts ? 1 : 0
count = var.argocd.enabled || var.argocd.image_updater.enabled || var.argocd.rollouts.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -98,7 +98,7 @@ resource "kubernetes_namespace" "argocd" {
}

resource "kubernetes_namespace" "prometheus" {
count = var.kube_prometheus_stack.enabled ? 1 : 0
count = var.prometheus.kube_stack.enabled ? 1 : 0

metadata {
labels = local.labels
Expand All @@ -107,7 +107,7 @@ resource "kubernetes_namespace" "prometheus" {
}

resource "kubernetes_namespace" "scaling" {
count = var.enable_cluster_overprovisioning ? 1 : 0
count = var.scaling.cluster_overprovisioning.enabled ? 1 : 0

metadata {
labels = local.labels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# External DNS (Private): Sync ingresses hosts with your DNS server.
#------------------------------------------------------------------------------
resource "helm_release" "externaldns_private" {
count = var.enable_private_dns_sync ? 1 : 0
count = var.dns_sync.private.enabled ? 1 : 0

# depends_on = [null_resource.download]

Expand All @@ -28,7 +28,7 @@ resource "helm_release" "externaldns_private" {
# External DNS (Public): Sync ingresses hosts with your DNS server.
#------------------------------------------------------------------------------
resource "helm_release" "externaldns_public" {
count = var.enable_public_dns_sync ? 1 : 0
count = var.dns_sync.public.enabled ? 1 : 0

name = "externaldns-public"
namespace = kubernetes_namespace.externaldns[0].id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# AWS Load Balancer (Ingress) Controller: Route outside traffic to the cluster.
#------------------------------------------------------------------------------
resource "helm_release" "alb_ingress" {
count = var.enable_alb_ingress_controller ? 1 : 0
count = var.ingress.alb_controller.enabled ? 1 : 0
name = "alb-ingress"
namespace = kubernetes_namespace.alb_ingress[0].id
repository = "https://aws.github.io/eks-charts"
Expand All @@ -22,7 +22,7 @@ resource "helm_release" "alb_ingress" {
# Nginx Ingress (Private): Route inside traffic to services in the cluster.
#------------------------------------------------------------------------------
resource "helm_release" "ingress_nginx_private" {
count = var.enable_nginx_ingress_controller ? 1 : 0
count = var.ingress.nginx_controller.enabled ? 1 : 0
name = "ingress-nginx-private"
namespace = kubernetes_namespace.ingress_nginx[0].id
repository = "https://kubernetes.github.io/ingress-nginx"
Expand Down Expand Up @@ -54,7 +54,7 @@ resource "helm_release" "ingress_nginx_private" {
#
#------------------------------------------------------------------------------
resource "kubernetes_ingress_v1" "apps" {
count = var.apps_ingress.enabled ? 1 : 0
count = var.ingress.apps_ingress.enabled ? 1 : 0
wait_for_load_balancer = true

metadata {
Expand All @@ -64,7 +64,7 @@ resource "kubernetes_ingress_v1" "apps" {
# This is used by the ALB Ingress
"kubernetes.io/ingress.class" = "${local.public_ingress_class}"
# Load balancer type: internet-facing or internal
"alb.ingress.kubernetes.io/scheme" = var.apps_ingress.type
"alb.ingress.kubernetes.io/scheme" = var.ingress.apps_ingress.type
# Group this LB under a custom group so it's not shared with other groups
"alb.ingress.kubernetes.io/group.name" = "apps"
# Nginx provides an endpoint for health checks
Expand All @@ -83,7 +83,7 @@ resource "kubernetes_ingress_v1" "apps" {
# NOTE: this is highly recommended when using an internet-facing ALB
"alb.ingress.kubernetes.io/inbound-cidrs" = "0.0.0.0/0"
# ALB access logs
"alb.ingress.kubernetes.io/load-balancer-attributes" = "access_logs.s3.enabled=${var.enable_eks_alb_logging},access_logs.s3.bucket=${var.project}-${var.environment}-alb-logs,access_logs.s3.prefix=${local.eks_alb_logging_prefix}"
"alb.ingress.kubernetes.io/load-balancer-attributes" = "access_logs.s3.enabled=${var.ingress.apps_ingress.logging.enabled},access_logs.s3.bucket=${var.project}-${var.environment}-alb-logs,access_logs.s3.prefix=${local.eks_alb_logging_prefix}"
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Vertical Pod Autoscaler: automatic pod vertical autoscaling.
#------------------------------------------------------------------------------
resource "helm_release" "vpa" {
count = var.enable_vpa_scaling ? 1 : 0
count = var.scaling.vpa.enabled ? 1 : 0
name = "vpa"
namespace = kubernetes_namespace.monitoring_metrics[0].id
repository = "https://charts.fairwinds.com/stable"
Expand All @@ -16,7 +16,7 @@ resource "helm_release" "vpa" {
# Cluster Autoscaler: automatic cluster nodes autoscaling.
#------------------------------------------------------------------------------
resource "helm_release" "cluster_autoscaling" {
count = var.enable_cluster_autoscaling ? 1 : 0
count = var.scaling.cluster_autoscaling.enabled ? 1 : 0
name = "autoscaler"
namespace = kubernetes_namespace.monitoring_metrics[0].id
repository = "https://kubernetes.github.io/autoscaler"
Expand Down Expand Up @@ -50,7 +50,7 @@ resource "helm_release" "cluster_autoscaling" {
# Another option is to start with one replica and then use the proportional
# autoscaler to control the minimum number of replicas there.
resource "helm_release" "cluster_overprovisioner" {
count = var.enable_cluster_overprovisioning ? 1 : 0
count = var.scaling.cluster_overprovisioning.enabled ? 1 : 0
name = "cluster-overprovisioner"
namespace = kubernetes_namespace.scaling[0].id
repository = "https://charts.deliveryhero.io/"
Expand Down Expand Up @@ -83,7 +83,7 @@ EOF
# targets must, as mush as possible, be assigned to a new node.
# - Also, don't forget about using proper values for the min and max settings.
resource "helm_release" "cluster_proportional_autoscaler" {
count = var.enable_cluster_overprovisioning ? 1 : 0
count = var.scaling.cluster_overprovisioning.enabled ? 1 : 0
name = "cluster-proportional-autoscaler"
namespace = kubernetes_namespace.scaling[0].id
repository = "https://kubernetes-sigs.github.io/cluster-proportional-autoscaler"
Expand Down
Loading
Loading