From 8339c3f1524c95ba339da6fc86a174da199d38bc Mon Sep 17 00:00:00 2001 From: "Vimal Paliwal (vim)" Date: Mon, 12 Aug 2024 21:12:16 +0100 Subject: [PATCH] Multiple important fixes (#5) * update readme and precommit gitleaks tag * update description for few vars and add missing and new vars --- .pre-commit-config.yaml | 2 +- README.md | 74 ++++++++++++++++++++++++++++++++++++++++- main.tf | 23 ++++++++----- variables.tf | 36 +++++++++++++------- 4 files changed, 112 insertions(+), 23 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ab4bf66..aad3226 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: detect-private-key - id: no-commit-to-branch - repo: https://github.com/gitleaks/gitleaks - rev: v8.18.42 + rev: v8.18.4 hooks: - id: gitleaks - repo: https://github.com/antonbabenko/pre-commit-terraform diff --git a/README.md b/README.md index 4597005..f823545 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Launch an EKS Managed Node Group -![License](https://img.shields.io/github/license/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Tests](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/tests.yml?branch=main&label=Test&style=for-the-badge) ![Checkov](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/checkov.yml?branch=main&label=Checkov&style=for-the-badge) ![Commit](https://img.shields.io/github/last-commit/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Release](https://img.shields.io/github/v/release/terrablocks/aws-eks-managed-node-group?style=for-the-badge) +![License](https://img.shields.io/github/license/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Plan](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/tf-plan.yml?branch=main&label=Plan&style=for-the-badge) ![Checkov](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/checkov.yml?branch=main&label=Checkov&style=for-the-badge) ![Commit](https://img.shields.io/github/last-commit/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Release](https://img.shields.io/github/v/release/terrablocks/aws-eks-managed-node-group?style=for-the-badge) This terraform module will deploy the following services: - EKS Node Group @@ -111,3 +111,75 @@ kubectl -n kube-system set image deployment.apps/cluster-autoscaler cluster-auto ```bash kubectl -n kube-system logs -f deployment.apps/cluster-autoscaler ``` + + +# Title + +![License](https://img.shields.io/github/license/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Plan](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/tf-plan.yml?branch=main&label=Plan&style=for-the-badge) ![Checkov](https://img.shields.io/github/actions/workflow/status/terrablocks/aws-eks-managed-node-group/checkov.yml?branch=main&label=Checkov&style=for-the-badge) ![Commit](https://img.shields.io/github/last-commit/terrablocks/aws-eks-managed-node-group?style=for-the-badge) ![Release](https://img.shields.io/github/v/release/terrablocks/aws-eks-managed-node-group?style=for-the-badge) + +This terraform module will deploy the following services: +- EKS Node Group +- Auto Scaling Group +- IAM Role +- IAM Role Policy + +# Usage Instructions +## Example +```hcl +module "eks_managed_node_group" { + source = "github.com/terrablocks/aws-eks-managed-node-group.git?ref=" # Always use `ref` to point module to a specific version or hash + + cluster_name = "eks-cluster" + subnet_ids = ["subnet-xxxx", "subnet-yyyy"] +} +``` + +## Requirements + +| Name | Version | +|------|---------| +| terraform | >= 1.8.0 | +| aws | >= 5.0.0 | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| ami_release_version | AMI version to use for EKS worker nodes. Leaving it to null will use latest available version | `string` | `null` | no | +| ami_type | Type of AMI to be used for EKS worker node. Supported values: AL2_x86_64, AL2_ARM_64, AL2_x86_64_GPU(AMI with GPU support) | `string` | `"AL2_x86_64"` | no | +| capacity_type | Type of purchase option to be used for EKS worker node. **Possible Values**: ON_DEMAND or SPOT | `string` | `"ON_DEMAND"` | no | +| cluster_name | Name of EKS cluster | `string` | n/a | yes | +| create_ng_role | Whether to create new IAM role for EKS worker nodes | `bool` | `true` | no | +| desired_size | Initial number of worker nodes to launch | `number` | `2` | no | +| disk_size | Size of each EBS volume attached to EKS worker node | `number` | `20` | no | +| force_update_version | Forcefully perform version update for worker nodes if pod disruption prevents node draining | `bool` | `false` | no | +| instance_types | List of type of instances to be used as EKS worker nodes | `list(string)` | ```[ "t3.medium" ]``` | no | +| labels | Key Value pair of Kubernetes labels to apply on worker nodes | `map(string)` | `{}` | no | +| launch_template | A config block with launch template details ```{ id = ID of the EC2 Launch Template to use. **Note:** Either `id` or `name` is required name = Name of the EC2 Launch Template to use. **Note:** Either `id` or `name` is required version = EC2 Launch Template version to use for launching instances }``` | `map(any)` | `{}` | no | +| max_size | Maximum number of worker nodes | `number` | `4` | no | +| min_size | Minimum number of worker nodes to maintain at any given point of time | `number` | `2` | no | +| ng_name | Name of EKS Node Group. Default: {cluster_name}-ng | `string` | `""` | no | +| ng_role_arn | ARN of IAM role to associate with EKS worker nodes. Leave it blank to create IAM role with required permissions | `string` | `""` | no | +| remote_access | A config block with EC2 remote access details ```{ ssh_key_name = Name of SSH key pair to associate to instances launched via node group sg_ids = Security group ids to attach to instances launched via node group }``` | `map(any)` | `{}` | no | +| subnet_ids | List of subnet ids to be used for launching EKS worker nodes | `list(string)` | n/a | yes | +| tags | Key Value pair to associate with node group | `map(string)` | `{}` | no | +| taints | List of taint block to associate with node group. Maximum of 50 taints per node group are supported ```{ key = Key of taint value = (Optional) Value of taint effect = Effect of taint. **Possible values:** NO_SCHEDULE, NO_EXECUTE or PREFER_NO_SCHEDULE }``` | `list(any)` | `[]` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| arn | ARN of EKS node group created | +| cluster_name | Name of the EKS cluster attached to the node group | +| id | EKS Cluster name and EKS Node Group name separated by a colon | +| name | Name of the managed node group associated with the EKS cluster | +| role_arn | ARN of the IAM role associated with EKS node group | +| role_name | Name of the IAM role associated with EKS node group | +| status | Status of the EKS node group | + +## Autoscaling nodes + +For autoscaling nodes you can setup either of two: +- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - [Helm Chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler) +- [Karpenter](https://karpenter.sh/) - [Helm Chart](https://github.com/aws/karpenter-provider-aws/tree/main/charts/karpenter) + diff --git a/main.tf b/main.tf index 9157222..aa19da8 100644 --- a/main.tf +++ b/main.tf @@ -18,19 +18,19 @@ resource "aws_iam_role" "eks_ng_role" { resource "aws_iam_role_policy_attachment" "ng_worker_policy" { count = var.create_ng_role ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" - role = join(", ", aws_iam_role.eks_ng_role.*.name) + role = join(", ", aws_iam_role.eks_ng_role[*].name) } resource "aws_iam_role_policy_attachment" "ng_cni_policy" { count = var.create_ng_role ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" - role = join(", ", aws_iam_role.eks_ng_role.*.name) + role = join(", ", aws_iam_role.eks_ng_role[*].name) } resource "aws_iam_role_policy_attachment" "ng_registry_policy" { count = var.create_ng_role ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - role = join(", ", aws_iam_role.eks_ng_role.*.name) + role = join(", ", aws_iam_role.eks_ng_role[*].name) } # Policy required for cluster autoscaling @@ -39,7 +39,7 @@ resource "aws_iam_role_policy" "eks_scaling_policy" { # checkov:skip=CKV_AWS_355: "*" for resource is required count = var.create_ng_role ? 1 : 0 name_prefix = "${var.cluster_name}-ng-role-policy-" - role = join(", ", aws_iam_role.eks_ng_role.*.id) + role = join(", ", aws_iam_role.eks_ng_role[*].id) policy = <<-EOF { @@ -64,14 +64,19 @@ resource "aws_iam_role_policy" "eks_scaling_policy" { } locals { - node_role_arn = var.create_ng_role ? join(", ", aws_iam_role.eks_ng_role.*.arn) : var.ng_role_arn + node_role_arn = var.create_ng_role ? join(", ", aws_iam_role.eks_ng_role[*].arn) : var.ng_role_arn } resource "aws_eks_node_group" "eks_ng" { - cluster_name = var.cluster_name - node_group_name = var.ng_name == "" ? "${var.cluster_name}-ng" : var.ng_name - node_role_arn = local.node_role_arn - subnet_ids = var.subnet_ids + cluster_name = var.cluster_name + node_group_name = var.ng_name == "" ? "${var.cluster_name}-ng" : var.ng_name + node_role_arn = local.node_role_arn + subnet_ids = var.subnet_ids + ami_type = var.ami_type + release_version = var.ami_release_version + disk_size = var.disk_size + force_update_version = var.force_update_version + instance_types = var.instance_types scaling_config { desired_size = var.desired_size diff --git a/variables.tf b/variables.tf index 43b8af8..b769b55 100644 --- a/variables.tf +++ b/variables.tf @@ -12,18 +12,18 @@ variable "ng_name" { variable "create_ng_role" { type = bool default = true - description = "Whether to create new IAM role for EKS nodes" + description = "Whether to create new IAM role for EKS worker nodes" } variable "ng_role_arn" { type = string default = "" - description = "ARN of IAM role to associate with EKS nodes. Leave it blank to create IAM role with required permissions" + description = "ARN of IAM role to associate with EKS worker nodes. Leave it blank to create IAM role with required permissions" } variable "subnet_ids" { type = list(string) - description = "List of subnet ids to be used for launching EKS nodes" + description = "List of subnet ids to be used for launching EKS worker nodes" } variable "launch_template" { @@ -42,49 +42,55 @@ variable "launch_template" { variable "desired_size" { type = number default = 2 - description = "Initial number of nodes to launch" + description = "Initial number of worker nodes to launch" } variable "max_size" { type = number default = 4 - description = "Maximum number of nodes" + description = "Maximum number of worker nodes" } variable "min_size" { type = number default = 2 - description = "Minimum number of nodes to maintain at any given point of time" + description = "Minimum number of worker nodes to maintain at any given point of time" } variable "capacity_type" { type = string default = "ON_DEMAND" - description = "Type of purchase option to be used for EKS node. **Possible Values**: ON_DEMAND or SPOT" + description = "Type of purchase option to be used for EKS worker node. **Possible Values**: ON_DEMAND or SPOT" } -variable "instance_type" { +variable "instance_types" { type = list(string) default = ["t3.medium"] - description = "List of type of instances to be used as EKS nodes" + description = "List of type of instances to be used as EKS worker nodes" } variable "disk_size" { type = number default = 20 - description = "Size of each EBS volume attached to EKS node" + description = "Size of each EBS volume attached to EKS worker node" } variable "labels" { type = map(string) default = {} - description = "Key Value pair of Kubernetes labels to apply on nodes" + description = "Key Value pair of Kubernetes labels to apply on worker nodes" } variable "ami_type" { type = string default = "AL2_x86_64" - description = "Type of AMI to be used for EKS node. Supported values: AL2_x86_64, AL2_ARM_64, AL2_x86_64_GPU(AMI with GPU support)" + description = "Type of AMI to be used for EKS worker node. Supported values: AL2_x86_64, AL2_ARM_64, AL2_x86_64_GPU(AMI with GPU support)" +} + +variable "ami_release_version" { + type = string + default = null + description = "AMI version to use for EKS worker nodes. Leaving it to null will use latest available version" } variable "remote_access" { @@ -112,6 +118,12 @@ variable "taints" { EOT } +variable "force_update_version" { + type = bool + default = false + description = "Forcefully perform version update for worker nodes if pod disruption prevents node draining" +} + variable "tags" { type = map(string) default = {}