From ee1c988794b52fde544d04b63916a3590c4d161c Mon Sep 17 00:00:00 2001 From: Pavel Nikolov Date: Fri, 26 Jul 2024 00:08:25 +0300 Subject: [PATCH 1/3] update terraform and add k8s files Signed-off-by: Pavel Nikolov --- README.md | 17 ++++++++- k8s/client.yaml | 41 ++++++++++++++++++++++ k8s/kustomization.yaml | 15 ++++++++ k8s/server.yaml | 34 ++++++++++++++++++ terraform/main.tf | 80 ++++++++++++++++++++++++++++++++++++++---- terraform/output.tf | 6 ++-- terraform/variables.tf | 22 ++++++++++++ 7 files changed, 205 insertions(+), 10 deletions(-) create mode 100644 k8s/client.yaml create mode 100644 k8s/kustomization.yaml create mode 100644 k8s/server.yaml diff --git a/README.md b/README.md index f032f7c..ceb3cb5 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,22 @@ Some functional tests have been added which test the process of registration, at ### Client and Server setup -Assuming that Docker is present on your machine, the client and the server can be started by running `docker compose up`. Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this: +Assuming that Docker is present on your machine, the client and the server can be started by running using the `docker-compose.yaml` file: + +```bash +$ docker compose up +[+] Running 2/0 + ✔ Container zkp-auth-server-1 Created 0.0s + ✔ Container zkp-auth-client-1 Created 0.0s +Attaching to client-1, server-1 +server-1 | Listening for connections on 0.0.0.0:50051 +client-1 | Registration successful. +client-1 | Received challenge from server. +client-1 | Successfully logged in! Session ID: OooJ8n7FOOU1ZyhxOqfBhsvK5x4mwdP7 +client-1 exited with code 0 +``` + +Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this: * Run `cargo run --bin zkpauth-server` in one terminal; and then * Run `cargo run --bin zkpauth-client` in another terminal diff --git a/k8s/client.yaml b/k8s/client.yaml new file mode 100644 index 0000000..d46fb2b --- /dev/null +++ b/k8s/client.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: app + namespace: zkpauth-client + labels: + app: app +spec: + template: + metadata: + labels: + app: app + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - server + namespaces: + - zkpauth + topologyKey: "kubernetes.io/hostname" + containers: + - name: app + image: ghcr.io/pavelnikolov/zkpauth-client:overridden-later + env: + - name: SERVER_ADDR + value: "http://server.zkpauth:50051" + - name: CLIENT_ID + value: "client" + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + restartPolicy: Never diff --git a/k8s/kustomization.yaml b/k8s/kustomization.yaml new file mode 100644 index 0000000..0b45a09 --- /dev/null +++ b/k8s/kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: zkp-auth + +resources: + - server.yaml + - client.yaml + +images: + - name: ghcr.io/pavelnikolov/zkpauth-server + newName: ghcr.io/pavelnikolov/zkpauth-server + newTag: latest + - name: ghcr.io/pavelnikolov/zkpauth-client + newName: ghcr.io/pavelnikolov/zkpauth-client + newTag: latest diff --git a/k8s/server.yaml b/k8s/server.yaml new file mode 100644 index 0000000..7d9e28b --- /dev/null +++ b/k8s/server.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: server + namespace: zkpauth + labels: + app: server +spec: + replicas: 1 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + restartPolicy: Always + containers: + - name: server + image: ghcr.io/pavelnikolov/zkpauth-server:overridden-later + ports: + - name: grpc + containerPort: 50051 + env: + - name: LISTEN_ADDR + value: "0.0.0.0:50051" + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 200m + memory: 200Mi diff --git a/terraform/main.tf b/terraform/main.tf index 116f0dc..41ed926 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -16,14 +16,24 @@ provider "aws" { region = var.aws_region } - resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" } -resource "aws_subnet" "main" { - vpc_id = aws_vpc.main.id - cidr_block = "10.0.1.0/24" +resource "aws_subnet" "az_a" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = format("%s%s", var.aws_region, "a") + + tags = { + Name = "main" + } +} + +resource "aws_subnet" "az_b" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.2.0/24" + availability_zone = format("%s%s", var.aws_region, "b") tags = { Name = "main" @@ -53,8 +63,32 @@ resource "aws_iam_role_policy_attachment" "cluster_policy" { role = aws_iam_role.cluster_role.name } -# Optionally, enable Security Groups for Pods -# Reference: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html +resource "aws_iam_role" "NodeGroupRole" { + name = "EKSNodeGroupRole" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }, + ] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = aws_iam_role.NodeGroupRole.name +} + +resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + role = aws_iam_role.NodeGroupRole.name +} + resource "aws_iam_role_policy_attachment" "vpc_resource_controller_policy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" role = aws_iam_role.cluster_role.name @@ -65,7 +99,7 @@ resource "aws_eks_cluster" "default" { role_arn = aws_iam_role.cluster_role.arn vpc_config { - subnet_ids = [aws_subnet.main.id] + subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id] } # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling. @@ -75,3 +109,35 @@ resource "aws_eks_cluster" "default" { aws_iam_role_policy_attachment.vpc_resource_controller_policy, ] } + +resource "aws_eks_node_group" "cluster_node_group" { + cluster_name = aws_eks_cluster.default.name + node_group_name = "${terraform.workspace}-cluster-node_group" + node_role_arn = aws_iam_role.NodeGroupRole.arn + subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id] + + scaling_config { + desired_size = 2 + max_size = 2 + min_size = 2 + } + + ami_type = "AL2_x86_64" + instance_types = ["t3.micro"] + capacity_type = "ON_DEMAND" + disk_size = 20 + + depends_on = [ + aws_iam_role_policy_attachment.AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.AmazonEKS_CNI_Policy + ] +} + +# use managed addons in order to make it easier to upgrade the Kubernetes version in future +resource "aws_eks_addon" "addons" { + for_each = { for addon in var.cluster_addons : addon.name => addon } + cluster_name = aws_eks_cluster.default.name + addon_name = each.value.name + addon_version = each.value.version + service_account_role_arn = aws_iam_role.cluster_role.arn +} diff --git a/terraform/output.tf b/terraform/output.tf index c182771..6c4e1ac 100644 --- a/terraform/output.tf +++ b/terraform/output.tf @@ -1,7 +1,9 @@ output "endpoint" { - value = aws_eks_cluster.default.endpoint + value = aws_eks_cluster.default.endpoint + sensitive = true } output "kubeconfig-certificate-authority-data" { - value = aws_eks_cluster.default.certificate_authority[0].data + value = aws_eks_cluster.default.certificate_authority[0].data + sensitive = true } diff --git a/terraform/variables.tf b/terraform/variables.tf index 58bc288..3628f0e 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -3,3 +3,25 @@ variable "aws_region" { default = "eu-central-1" type = string } + +variable "cluster_addons" { + type = list(object({ + name = string + version = string + })) + + default = [ + { + name = "kube-proxy" + version = "v1.30.0-eksbuild.3" + }, + { + name = "vpc-cni" + version = "v1.18.2-eksbuild.1" + }, + { + name = "coredns" + version = "v1.11.1-eksbuild.9" + } + ] +} \ No newline at end of file From e34be80baf5e8d95cfa63b4a462d9b129c7fdea6 Mon Sep 17 00:00:00 2001 From: Pavel Nikolov Date: Mon, 29 Jul 2024 07:31:09 +0300 Subject: [PATCH 2/3] update k8s Signed-off-by: Pavel Nikolov --- .github/workflows/deploy.yml | 68 +++++ .github/workflows/terraform-apply.yml | 6 +- .github/workflows/terraform-plan.yml | 1 + k8s/client-ns.yaml | 4 + k8s/kustomization.yaml | 4 +- k8s/server-ns.yaml | 4 + k8s/server-svc.yaml | 11 + terraform/.terraform.lock.hcl | 53 ++-- terraform/main.tf | 395 +++++++++++++++++++++----- terraform/output.tf | 8 +- terraform/variables.tf | 24 +- 11 files changed, 456 insertions(+), 122 deletions(-) create mode 100644 .github/workflows/deploy.yml create mode 100644 k8s/client-ns.yaml create mode 100644 k8s/server-ns.yaml create mode 100644 k8s/server-svc.yaml diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..87c452e --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,68 @@ +name: "Network Terraform Apply" + +on: + ## Trigger the workflow manually + workflow_dispatch: + +env: + TF_WORKSPACE: "default" + CONFIG_DIRECTORY: "./terraform" + AWS_REGION: ${{ secrets.AWS_REGION }} + +jobs: + terraform: + name: "Network Terraform Plan & Apply" + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.CONFIG_DIRECTORY }} + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@v4 + + - name: Assume AWS Credentials + id: assume + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-region: ${{ secrets.AWS_REGION }} + role-session-name: ${{ github.actor }} + role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + + - uses: hashicorp/setup-terraform@v3 + + - name: Terraform fmt + id: fmt + run: terraform fmt -check + continue-on-error: true + + - name: Terraform Init + id: init + run: terraform init + + - name: Set terraform output vars + id: vars + run: | + printf "cluster_name=%s\n" $(terraform output -raw cluster_name) >> "$GITHUB_OUTPUT" + + + - id: install-aws-cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 # default + verbose: false # default + arch: amd64 # allowed values: amd64, arm64 + + - uses: tale/kubectl-action@v1 + with: + base64-kube-config: ${{ secrets.KUBE_CONFIG }} + kubectl-version: v1.30.0 + + - name: configure kubeconfig + run: | + aws eks --region ${{ secrets.AWS_REGION }} update-kubeconfig --name ${{ steps.vars.outputs.cluster_name }} + + - name: Kubernetes Apply + run: kubectl apply -k k8s/ diff --git a/.github/workflows/terraform-apply.yml b/.github/workflows/terraform-apply.yml index 8fc20f8..c1e71ae 100644 --- a/.github/workflows/terraform-apply.yml +++ b/.github/workflows/terraform-apply.yml @@ -1,11 +1,7 @@ name: "Network Terraform Apply" on: - push: - branches: - - main - paths: - - 'terraform/**' + ## Trigger the workflow manually workflow_dispatch: env: diff --git a/.github/workflows/terraform-plan.yml b/.github/workflows/terraform-plan.yml index d449977..0137202 100644 --- a/.github/workflows/terraform-plan.yml +++ b/.github/workflows/terraform-plan.yml @@ -4,6 +4,7 @@ on: pull_request: paths: - 'terraform/**' + workflow_dispatch: env: TF_WORKSPACE: "default" diff --git a/k8s/client-ns.yaml b/k8s/client-ns.yaml new file mode 100644 index 0000000..4761e04 --- /dev/null +++ b/k8s/client-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: zkpauth-client \ No newline at end of file diff --git a/k8s/kustomization.yaml b/k8s/kustomization.yaml index 0b45a09..936c9a7 100644 --- a/k8s/kustomization.yaml +++ b/k8s/kustomization.yaml @@ -1,10 +1,12 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: zkp-auth resources: + - client-ns.yaml + - server-ns.yaml - server.yaml - client.yaml + - server-svc.yaml images: - name: ghcr.io/pavelnikolov/zkpauth-server diff --git a/k8s/server-ns.yaml b/k8s/server-ns.yaml new file mode 100644 index 0000000..3bc2a0b --- /dev/null +++ b/k8s/server-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: zkpauth diff --git a/k8s/server-svc.yaml b/k8s/server-svc.yaml new file mode 100644 index 0000000..3bd8f40 --- /dev/null +++ b/k8s/server-svc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: server + namespace: zkpauth +spec: + ports: + - port: 50051 + targetPort: grpc + selector: + name: server diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl index 3deaccb..8873531 100644 --- a/terraform/.terraform.lock.hcl +++ b/terraform/.terraform.lock.hcl @@ -2,23 +2,44 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "5.59.0" + version = "5.60.0" + constraints = "~> 5.0" hashes = [ - "h1:RTI7BbGzkYrknXFSa/Fmh6oNLP0FqgjNb+xyFNJeyKo=", - "zh:077f41a15057d01d833d7438322adf9b507d17ac0c8e1287430a305b6e609775", - "zh:130b112c85b67413bc65e95e5927188d8e41b45abd75350690b93d95771a587c", - "zh:16e97f1af67a5d4c6bf4f2df824a6a332b446be4516dd85a2e097317c959a174", - "zh:1cd7b0946eaf0fb11090710e9c774d22d90de0ca4516485253be96e332ebaf73", - "zh:2591d8a269014fb59111793cb8a175aafa12e370cd856fe2522577efbb72e5be", - "zh:3db5387ecc7da4e6a55a34877ea426ae87d10238bdbdf284a52e16b4be83302c", - "zh:78169400a85912d7f05fe99d4f3ba9a56871411442bdc133083dd657b18fae4e", + "h1:p9+40kdklLTJLQ/y7wxNjuKxUK8AVB4L9424NGNK4rY=", + "zh:08f49c9eb865e136a55dda3eb2b790f6d55cdac49f6638391dbea4b865cf307b", + "zh:090dd8b40ebf0f8e9ea05b9a142add9caeb7988d3d96c5c112e8c67c0edf566f", + "zh:30f336af1b4f0824fce2cc6e81af0986b325b135436c9d892d081e435aeed67e", + "zh:338195ca3b41249874110253412d8913f770c22294af05799ea1e343050906f5", + "zh:3a8a45b17750b01192a0fbeeed0d05c2c04840344d78d5e3233b3ecbeec17a1c", + "zh:486efe72d39f0736d9b7e00e5b889288264458a57aa0cff2d75688d6db372ee5", + "zh:5fdccc448a085fea8ecfae43ae326840abfcdf1a0aa8b8c79dd466392aa5cc3a", + "zh:9521639755cd07ec7efde86a534770e436e16a93692d070a00f6419c1038d59c", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:ad93fedbf1d2694faab6d793c6697ff5732449cdebacaa49acf6452c0c8e2ea0", - "zh:b8a2884858dde9d204dc6855903e3078a1c402485ae85b41c28e667f99a2a777", - "zh:bd3d4bd51172d08c0df277673a25fb3f0818ef47ef9f491b0c41e880b1dedce3", - "zh:d8e132bcafee2e69e21173fac409e4b99d8c81d60a7d25c58c379c67067dbf36", - "zh:eee5113ff29a42c5a75c83e9853e99a9b5c0ed066e36d6fe251083b19d38c7eb", - "zh:f0d8bcdb01d0fa0c9ed2ca8c198d4f11aabfd9d42fa239286b65ddcc6f606dfd", - "zh:f8ae46d14ec54c275e20f71d052f1b6af0cf948819b0667016045a6244edf292", + "zh:c2fb9240a069da9f51e7379e76c3dfaad15a97430c2e32708a7d18345434e310", + "zh:daba836b89537dfa72bb8c77e88850c20fda2a3d0f5b3803cd3d6da0ce283e3e", + "zh:db7e0755ed120ed8311f6663f49aa7157da5072b906727db3a6c47d64e0b82c6", + "zh:ea5e3fca5197639c4ad1415ca96de2924a351ecd1a885dd9184843d5eec18dbb", + "zh:f3f322951d311e45a47361f24790a90a0b8ba6d3829a00c4066a361960d2ecef", + "zh:f48b44f4887d4b51a1406057f15f1e2161cb02b271b2659349958904c678e91c", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "4.0.5" + constraints = ">= 3.0.0" + hashes = [ + "h1:zeG5RmggBZW/8JWIVrdaeSJa0OG62uFX5HY1eE8SjzY=", + "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", + "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", + "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", + "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", + "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", + "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", + "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", + "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", + "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", + "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", + "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } diff --git a/terraform/main.tf b/terraform/main.tf index 41ed926..3c9a22b 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -13,108 +13,259 @@ terraform { } provider "aws" { - region = var.aws_region + region = var.region } resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" + + tags = { + Name = "main" + } +} + + +locals { + cluster_name = "demo" +} + +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.main.id + + tags = { + Name = "igw" + } } -resource "aws_subnet" "az_a" { +resource "aws_subnet" "private-a" { vpc_id = aws_vpc.main.id - cidr_block = "10.0.1.0/24" - availability_zone = format("%s%s", var.aws_region, "a") + cidr_block = "10.0.0.0/19" + availability_zone = format("%s%s", var.region, "a") tags = { - Name = "main" + "Name" = "private-a" + "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/demo" = "owned" } } -resource "aws_subnet" "az_b" { +resource "aws_subnet" "private-b" { vpc_id = aws_vpc.main.id - cidr_block = "10.0.2.0/24" - availability_zone = format("%s%s", var.aws_region, "b") + cidr_block = "10.0.32.0/19" + availability_zone = format("%s%s", var.region, "b") tags = { - Name = "main" + "Name" = "private-b" + "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/demo" = "owned" } } -data "aws_iam_policy_document" "assume_role" { - statement { - effect = "Allow" +resource "aws_subnet" "public-a" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.64.0/19" + availability_zone = format("%s%s", var.region, "a") + map_public_ip_on_launch = true - principals { - type = "Service" - identifiers = ["eks.amazonaws.com"] - } + tags = { + "Name" = "public-a" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/cluster/demo" = "owned" + } +} - actions = ["sts:AssumeRole"] +resource "aws_subnet" "public-b" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.96.0/19" + availability_zone = format("%s%s", var.region, "b") + map_public_ip_on_launch = true + + tags = { + "Name" = "public-b" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/cluster/demo" = "owned" } } -resource "aws_iam_role" "cluster_role" { - name = "${terraform.workspace}-eks-cluster" - assume_role_policy = data.aws_iam_policy_document.assume_role.json +resource "aws_eip" "nat" { + tags = { + Name = "nat" + } } -resource "aws_iam_role_policy_attachment" "cluster_policy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" - role = aws_iam_role.cluster_role.name +resource "aws_nat_gateway" "nat" { + allocation_id = aws_eip.nat.id + subnet_id = aws_subnet.public-a.id + + tags = { + Name = "nat" + } + + depends_on = [aws_internet_gateway.igw] } -resource "aws_iam_role" "NodeGroupRole" { - name = "EKSNodeGroupRole" - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = "sts:AssumeRole" - Effect = "Allow" - Principal = { - Service = "ec2.amazonaws.com" - } +resource "aws_route_table" "private" { + vpc_id = aws_vpc.main.id + + route = [ + { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat.id + carrier_gateway_id = "" + core_network_arn = "" + destination_prefix_list_id = "" + egress_only_gateway_id = "" + gateway_id = "" + instance_id = "" + ipv6_cidr_block = "" + local_gateway_id = "" + network_interface_id = "" + transit_gateway_id = "" + vpc_endpoint_id = "" + vpc_peering_connection_id = "" + }, + ] + + tags = { + Name = "private" + } +} + +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route = [ + { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id + nat_gateway_id = "" + carrier_gateway_id = "" + core_network_arn = "" + destination_prefix_list_id = "" + egress_only_gateway_id = "" + instance_id = "" + ipv6_cidr_block = "" + local_gateway_id = "" + network_interface_id = "" + transit_gateway_id = "" + vpc_endpoint_id = "" + vpc_peering_connection_id = "" + }, + ] + + tags = { + Name = "public" + } +} + +resource "aws_route_table_association" "private-a" { + subnet_id = aws_subnet.private-a.id + route_table_id = aws_route_table.private.id +} + +resource "aws_route_table_association" "private-b" { + subnet_id = aws_subnet.private-b.id + route_table_id = aws_route_table.private.id +} + +resource "aws_route_table_association" "public-a" { + subnet_id = aws_subnet.public-a.id + route_table_id = aws_route_table.public.id +} + +resource "aws_route_table_association" "public-b" { + subnet_id = aws_subnet.public-b.id + route_table_id = aws_route_table.public.id +} + +resource "aws_iam_role" "demo" { + name = "eks-cluster-demo" + + assume_role_policy = < addon } - cluster_name = aws_eks_cluster.default.name - addon_name = each.value.name - addon_version = each.value.version - service_account_role_arn = aws_iam_role.cluster_role.arn +data "tls_certificate" "eks" { + url = aws_eks_cluster.demo.identity[0].oidc[0].issuer +} + +resource "aws_iam_openid_connect_provider" "eks" { + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.eks.certificates[0].sha1_fingerprint] + url = aws_eks_cluster.demo.identity[0].oidc[0].issuer +} + +data "aws_iam_policy_document" "test_oidc_assume_role_policy" { + statement { + actions = ["sts:AssumeRoleWithWebIdentity"] + effect = "Allow" + + condition { + test = "StringEquals" + variable = "${replace(aws_iam_openid_connect_provider.eks.url, "https://", "")}:sub" + values = ["system:serviceaccount:default:aws-test"] + } + + principals { + identifiers = [aws_iam_openid_connect_provider.eks.arn] + type = "Federated" + } + } +} + +resource "aws_iam_role" "test_oidc" { + assume_role_policy = data.aws_iam_policy_document.test_oidc_assume_role_policy.json + name = "test-oidc" +} + +resource "aws_iam_policy" "test-policy" { + name = "test-policy" + + policy = jsonencode({ + Statement = [{ + Action = [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ] + Effect = "Allow" + Resource = "arn:aws:s3:::*" + }] + Version = "2012-10-17" + }) +} + +resource "aws_iam_role_policy_attachment" "test_attach" { + role = aws_iam_role.test_oidc.name + policy_arn = aws_iam_policy.test-policy.arn +} + +data "aws_iam_policy_document" "eks_cluster_autoscaler_assume_role_policy" { + statement { + actions = ["sts:AssumeRoleWithWebIdentity"] + effect = "Allow" + + condition { + test = "StringEquals" + variable = "${replace(aws_iam_openid_connect_provider.eks.url, "https://", "")}:sub" + values = ["system:serviceaccount:kube-system:cluster-autoscaler"] + } + + principals { + identifiers = [aws_iam_openid_connect_provider.eks.arn] + type = "Federated" + } + } +} + +resource "aws_iam_role" "eks_cluster_autoscaler" { + assume_role_policy = data.aws_iam_policy_document.eks_cluster_autoscaler_assume_role_policy.json + name = "eks-cluster-autoscaler" +} + +resource "aws_iam_policy" "eks_cluster_autoscaler" { + name = "eks-cluster-autoscaler" + + policy = jsonencode({ + Statement = [{ + Action = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeLaunchTemplateVersions" + ] + Effect = "Allow" + Resource = "*" + }] + Version = "2012-10-17" + }) +} + +resource "aws_iam_role_policy_attachment" "eks_cluster_autoscaler_attach" { + role = aws_iam_role.eks_cluster_autoscaler.name + policy_arn = aws_iam_policy.eks_cluster_autoscaler.arn } diff --git a/terraform/output.tf b/terraform/output.tf index 6c4e1ac..5437ab2 100644 --- a/terraform/output.tf +++ b/terraform/output.tf @@ -1,9 +1,9 @@ -output "endpoint" { - value = aws_eks_cluster.default.endpoint +output "cluster_endpoint" { + value = aws_eks_cluster.demo.endpoint sensitive = true } -output "kubeconfig-certificate-authority-data" { - value = aws_eks_cluster.default.certificate_authority[0].data +output "cluster_name" { + value = aws_eks_cluster.demo.name sensitive = true } diff --git a/terraform/variables.tf b/terraform/variables.tf index 3628f0e..868825f 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -1,27 +1,5 @@ -variable "aws_region" { +variable "region" { description = "AWS region" default = "eu-central-1" type = string } - -variable "cluster_addons" { - type = list(object({ - name = string - version = string - })) - - default = [ - { - name = "kube-proxy" - version = "v1.30.0-eksbuild.3" - }, - { - name = "vpc-cni" - version = "v1.18.2-eksbuild.1" - }, - { - name = "coredns" - version = "v1.11.1-eksbuild.9" - } - ] -} \ No newline at end of file From 3cad1205c8ff859eccfb97ac30e76893b8383b83 Mon Sep 17 00:00:00 2001 From: Pavel Nikolov Date: Mon, 29 Jul 2024 07:50:36 +0300 Subject: [PATCH 3/3] fix cidr --- terraform/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 3c9a22b..9348388 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -117,7 +117,7 @@ resource "aws_route_table" "private" { egress_only_gateway_id = "" gateway_id = "" instance_id = "" - ipv6_cidr_block = "" + ipv6_cidr_block = "::/0" local_gateway_id = "" network_interface_id = "" transit_gateway_id = "" @@ -144,7 +144,7 @@ resource "aws_route_table" "public" { destination_prefix_list_id = "" egress_only_gateway_id = "" instance_id = "" - ipv6_cidr_block = "" + ipv6_cidr_block = "::/0" local_gateway_id = "" network_interface_id = "" transit_gateway_id = ""