-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
137 lines (118 loc) · 4.1 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# First Kubernetes node is going to be master, others are workers. HA control plane not yet implemented.
#
# After launching master:
# make kube-master-ssh
# sudo tail /var/log/cloud-init-output.log -n 30
#
# If initialization succeeded, it should coutain something like this:
# Your Kubernetes control-plane has initialized successfully!
#
# Follow those instructions in output:
# To start using your cluster, you need to run the following as a regular user:
# mkdir -p $HOME/.kube
# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# sudo chown $(id -u):$(id -g) $HOME/.kube/config
#
# Verify by running on the node:
# kubectl get nodes
#
# Transfer kubeconfig to your workstation:
# make kube-config
# And verify again, for your workstation:
# kubectl get nodes
#
# Set Terraform variables kube_join_address, kube_join_token, kube_join_ca_cert_hash
# to the values found in /var/log/cloud-init-output.log
#
# You can now apply your favourite CNI, say Flannel and launch the rest of nodes by increasing kube_nodes_count variable.
resource "hcloud_server" "kube_nodes" {
count = coalesce(
var.kube_join_address,
var.kube_join_token,
var.kube_join_ca_cert_hash,
"N/A"
) != "N/A" ? var.kube_nodes_count : 1
name = "kube-${count.index == 0 ? "master" : "worker"}-${count.index}"
location = "nbg1" # Nuremberg, eu-central
image = "ubuntu-22.04"
server_type = "cx21"
public_net {
ipv4_enabled = true
ipv6_enabled = false
}
user_data = <<YAML
#cloud-config
users:
- name: alex
groups: sudo, docker
shell: /bin/bash
lock_passwd: false
# mkpasswd --method=SHA-512 --rounds=4096
passwd: "${var.password_hash}"
ssh_authorized_keys:
- ${var.ssh_public_key}
runcmd:
- |
tee /etc/modules-load.d/kubernetes.conf <<EOF
overlay
br_netfilter
EOF
- modprobe overlay
- modprobe br_netfilter
- |
tee /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
- sysctl --system
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- |
tee /etc/apt/sources.list.d/docker.list <<EOF
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable
EOF
- apt-get update
- apt-get install -y containerd.io
- containerd config default | tee /etc/containerd/config.toml > /dev/null
- sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
- systemctl restart containerd
- curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
- |
tee /etc/apt/sources.list.d/kubernetes.list <<EOF
deb [arch=amd64 signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main
EOF
- apt-get update
- apt-get install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
- kubeadm config images pull
- >-
%{~if count.index == 0~}
kubeadm init
--service-cidr 10.112.48.0/20
--pod-network-cidr 10.112.64.0/20
--upload-certs
--token-ttl 0
%{~else~}
kubeadm join "${coalesce(var.kube_join_address, "N/A")}"
--token "${coalesce(var.kube_join_token, "N/A")}"
--discovery-token-ca-cert-hash "${coalesce(var.kube_join_ca_cert_hash, "N/A")}"
%{~endif~}
YAML
lifecycle {
ignore_changes = [user_data]
}
}
# TODO: optimize if possible easily:
# On master:
# Cloud-init v. 23.1.2-0ubuntu0~22.04.1 finished at Sun, 09 Jul 2023 14:32:59 +0000. Datasource DataSourceHetzner.
# Up 86.07 seconds
locals {
kube_worker_nodes = slice(hcloud_server.kube_nodes, 1, length(hcloud_server.kube_nodes))
}
resource "hcloud_rdns" "kube_workers" {
for_each = var.kube_workers_public ? { for node in local.kube_worker_nodes : node.name => node } : {}
server_id = each.value.id
ip_address = each.value.ipv4_address
dns_ptr = "${each.key}.lab.ulexxander.github.com"
}
# TODO: firewall for kube nodes.