Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Scaleway #8

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Set up a Nomad cluster on the major cloud platforms

This repo is a companion to the [Cluster Setup](https://developer.hashicorp.com/nomad/tutorials/cluster-setup) collection of tutorials, containing configuration files to create a Nomad cluster with ACLs enabled on AWS, GCP, and Azure.
This repo is a companion to the [Cluster Setup](https://developer.hashicorp.com/nomad/tutorials/cluster-setup) collection of tutorials, containing configuration files to create a Nomad cluster with ACLs enabled on AWS, GCP Azure and Scaleway.
41 changes: 41 additions & 0 deletions scaleway/image.pkr.hcl
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
locals {
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
}

variable "zone" {
type = string
default = "fr-par-1"
}

variable "project_id" {
type = string
default = null
}

source "scaleway" "hashistack" {
commercial_type = "PLAY2-NANO"
image = "ubuntu_focal"
image_name = "hashistack-${local.timestamp}"
ssh_username = "root"
zone = var.zone
cleanup_machine_related_data = true
}

build {
sources = ["source.scaleway.hashistack"]

provisioner "shell" {
inline = ["sudo mkdir -p /ops/shared", "sudo chmod 777 -R /ops"]
}

provisioner "file" {
destination = "/ops"
source = "../shared"
}

provisioner "shell" {
environment_vars = ["INSTALL_NVIDIA_DOCKER=false", "CLOUD_ENV=scaleway"]
script = "../shared/scripts/setup.sh"
}

}
268 changes: 268 additions & 0 deletions scaleway/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
terraform {
required_providers {
scaleway = {
source = "scaleway/scaleway"
version = ">= 2.13.0"
}
}
required_version = ">= 0.13"
}

provider "scaleway" {
zone = var.zone
project_id = var.project_id
}

resource "scaleway_iam_application" "auto_discovery" {
name = "${var.name}-application-auto-discovery"
description = "Nomad application"
}

data "scaleway_account_project" "selected" {
project_id = var.project_id
name = var.project_id != null ? null : "default"
}

resource "scaleway_iam_policy" "auto_discovery" {
name = "${var.name}-policy-auto-discovery"
description = "Auto discovery policy for Nomad"
application_id = scaleway_iam_application.auto_discovery.id

rule {
project_ids = [data.scaleway_account_project.selected.id]
permission_set_names = ["InstancesReadOnly"]
}
}

resource "scaleway_iam_api_key" "auto_discovery" {
application_id = scaleway_iam_application.auto_discovery.id
description = "Auto discovery key for Nomad"
}

locals {
retry_join_full = "${var.retry_join} token=${scaleway_iam_api_key.auto_discovery.secret_key}"
}

/**
* Nomad Servers
*/
data "scaleway_instance_image" "server" {
name = var.instance_image
}


data "cloudinit_config" "server" {
gzip = false
base64_encode = false

part {
filename = "user-data-server.sh"
content_type = "text/x-shellscript"

content = templatefile("../shared/data-scripts/user-data-server.sh", {
server_count = var.server_count
zone = var.zone
cloud_env = "scaleway"
retry_join = local.retry_join_full
nomad_binary = var.nomad_binary
nomad_consul_token_id = var.nomad_consul_token_id
nomad_consul_token_secret = var.nomad_consul_token_secret
})
}
}

resource "scaleway_instance_ip" "server" {
count = var.server_count
tags = ["nomad", "consul-auto-join", "nomad-server"]
}

resource "scaleway_instance_server" "server" {
count = var.server_count

name = "${var.name}-server-${count.index}"
tags = ["nomad", "consul-auto-join", "nomad-server"]

type = var.server_instance_type
image = data.scaleway_instance_image.server.id

ip_id = scaleway_instance_ip.server[count.index].id

security_group_id = scaleway_instance_security_group.servers_ingress.id

root_volume {
volume_type = "b_ssd"
size_in_gb = var.server_root_block_device_size
delete_on_termination = true
}

user_data = {
"cloud-init" = data.cloudinit_config.server.rendered
}
}

resource "scaleway_instance_security_group" "servers_ingress" {
name = "${var.name}-servers-ingress"
tags = ["nomad"]

inbound_default_policy = "drop"
external_rules = true
}


data "cloudinit_config" "client" {
gzip = false
base64_encode = false

part {
filename = "user-data-server.sh"
content_type = "text/x-shellscript"

content = templatefile("../shared/data-scripts/user-data-client.sh", {
zone = var.zone
cloud_env = "scaleway"
retry_join = local.retry_join_full
nomad_binary = var.nomad_binary
nomad_consul_token_secret = var.nomad_consul_token_secret
})
}
}

resource "scaleway_instance_ip" "client" {
count = var.client_count
tags = ["nomad", "consul-auto-join", "nomad-client"]
}

/**
* Nomad Clients
*/
resource "scaleway_instance_server" "client" {
depends_on = [
scaleway_instance_server.server
]

count = var.client_count

name = "${var.name}-client-${count.index}"
tags = ["nomad", "consul-auto-join", "nomad-client"]

type = var.client_instance_type
image = data.scaleway_instance_image.server.id

ip_id = scaleway_instance_ip.client[count.index].id

security_group_id = scaleway_instance_security_group.clients_ingress.id

root_volume {
volume_type = "b_ssd"
size_in_gb = 50
delete_on_termination = true
}

user_data = {
"cloud-init" = data.cloudinit_config.client.rendered
}
}

resource "scaleway_instance_security_group" "clients_ingress" {
name = "${var.name}-clients-ingress"
tags = ["nomad"]

inbound_default_policy = "drop"
external_rules = true
}

resource "scaleway_instance_security_group_rules" "servers_ingress" {
security_group_id = scaleway_instance_security_group.servers_ingress.id

inbound_rule {
action = "accept"
port = 4646
protocol = "TCP"
ip_range = var.allowlist_ip
}

inbound_rule {
action = "accept"
port = 8500
protocol = "TCP"
ip_range = var.allowlist_ip
}

inbound_rule {
action = "accept"
port = 22
protocol = "TCP"
ip_range = var.allowlist_ip
}

inbound_rule {
action = "accept"
protocol = "ICMP"
ip_range = var.allowlist_ip
}

dynamic "inbound_rule" {
for_each = toset(scaleway_instance_server.server)

content {
action = "accept"
protocol = "TCP"
ip_range = "${inbound_rule.value.public_ip}/32"
}
}

dynamic "inbound_rule" {
for_each = toset(scaleway_instance_server.server)

content {
action = "accept"
protocol = "TCP"
ip_range = "${inbound_rule.value.private_ip}/32"
}
}

dynamic "inbound_rule" {
for_each = toset(scaleway_instance_server.client)

content {
action = "accept"
protocol = "TCP"
ip_range = "${inbound_rule.value.public_ip}/32"
}
}

dynamic "inbound_rule" {
for_each = toset(scaleway_instance_server.client)

content {
action = "accept"
protocol = "TCP"
ip_range = "${inbound_rule.value.private_ip}/32"
}
}
}

resource "scaleway_instance_security_group_rules" "clients_ingress" {
security_group_id = scaleway_instance_security_group.clients_ingress.id

// copy the inbound rules from the servers_ingress security group
dynamic "inbound_rule" {
for_each = scaleway_instance_security_group_rules.servers_ingress.inbound_rule
content {
action = inbound_rule.value.action
port = inbound_rule.value.port == null ? 0 : inbound_rule.value.port
protocol = inbound_rule.value.protocol
ip_range = inbound_rule.value.ip_range
}
}

# Add application ingress rules here
# These rules are applied only to the client nodes

inbound_rule {
action = "accept"
port = 80
protocol = "TCP"
ip_range = var.allowlist_ip
}
}
19 changes: 19 additions & 0 deletions scaleway/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
output "lb_address_consul_nomad" {
value = "http://${scaleway_instance_ip.server[0].address}"
}

output "consul_bootstrap_token_secret" {
value = var.nomad_consul_token_secret
}

output "IP_Addresses" {
value = <<CONFIGURATION

Client public IPs: ${join(", ", scaleway_instance_ip.client[*].address)}

Server public IPs: ${join(", ", scaleway_instance_ip.server[*].address)}

The Consul UI can be accessed at http://${scaleway_instance_ip.server[0].address}:8500/ui
with the bootstrap token: ${var.nomad_consul_token_secret}
CONFIGURATION
}
31 changes: 31 additions & 0 deletions scaleway/post-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/bin/bash

NOMAD_USER_TOKEN_FILENAME="nomad.token"
LB_ADDRESS=$(terraform output -raw lb_address_consul_nomad)
CONSUL_BOOTSTRAP_TOKEN=$(terraform output -raw consul_bootstrap_token_secret)

# Get nomad user token from consul kv
NOMAD_TOKEN=$(curl -s --header "Authorization: Bearer ${CONSUL_BOOTSTRAP_TOKEN}" "${LB_ADDRESS}:8500/v1/kv/nomad_user_token?raw")

# Save token to file if file doesn't already exist
if [ ! -f $NOMAD_USER_TOKEN_FILENAME ]; then
echo $NOMAD_TOKEN > $NOMAD_USER_TOKEN_FILENAME

# Check length of token to see if retrieval worked before deleting from KV
if [ ${#NOMAD_TOKEN} -eq 36 ]; then
# Delete nomad user token from consul kv
DELETE_TOKEN=$(curl -s -X DELETE --header "Authorization: Bearer ${CONSUL_BOOTSTRAP_TOKEN}" "${LB_ADDRESS}:8500/v1/kv/nomad_user_token")

echo -e "\nThe Nomad user token has been saved locally to $NOMAD_USER_TOKEN_FILENAME and deleted from the Consul KV store."

echo -e "\nSet the following environment variables to access your Nomad cluster with the user token created during setup:\n\nexport NOMAD_ADDR=\$(terraform output -raw lb_address_consul_nomad):4646\nexport NOMAD_TOKEN=\$(cat $NOMAD_USER_TOKEN_FILENAME)\n"

echo -e "\nThe Nomad UI can be accessed at ${LB_ADDRESS}:4646/ui\nwith the bootstrap token: $(cat $NOMAD_USER_TOKEN_FILENAME)"

else
echo -e "\nSomething went wrong when retrieving the token from the Consul KV store.\nCheck the nomad.token file or wait a bit and then try running the script again.\n\nNOT deleting token from KV."
fi

else
echo -e "\n***\nThe $NOMAD_USER_TOKEN_FILENAME file already exists - not overwriting. If this is a new run, delete it first.\n***"
fi
29 changes: 29 additions & 0 deletions scaleway/variables.hcl.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Packer variables (all are required)
zone = "fr-par-1"

# Terraform variables (all are required)
nomad_consul_token_id = "123e4567-e89b-12d3-a456-426614174000"
nomad_consul_token_secret = "123e4567-e89b-12d3-a456-426614174000"
instance_image = "hashistack-20230310141424"

# The project ID will default to the value of the
# SCW_DEFAULT_PROJECT_ID environment variable or the
# default project ID configured with the Scaleway CLI
# project_id = "123e4567-e89b-12d3-a456-426614174000"

# The retry join allows Consul to automatically
# discover other nodes in the cluster. An IAM key will
# be created in Terraform and appended to the retry_join
# variable
# retry_join = "provider=scaleway tag_name=consul-auto-join"

# These variables will default to the values shown
# and do not need to be updated unless you want to
# change them
# allowlist_ip = "0.0.0.0/0"
# name = "nomad"
# server_instance_type = "PLAY2-NANO"
# server_count = "3"
# server_root_block_device_size = 20
# client_instance_type = "PLAY2-NANO"
# client_count = "3"
Loading