Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci/e2e: install the 3rd first nodes all in parallel #678

Merged
merged 2 commits into from
Feb 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions .github/workflows/master-e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -301,18 +301,13 @@ jobs:
- name: Configure Rancher & Libvirt
if: inputs.test_type == 'cli'
run: cd tests && make e2e-configure-rancher
- name: Bootstrap node 1 with current build (use Emulated TPM and iPXE)
if: inputs.test_type == 'cli'
env:
EMULATE_TPM: true
VM_INDEX: 1
run: cd tests && make e2e-bootstrap-node
- name: Bootstrap node 2 and 3 with current build (use Emulated TPM and ISO)
- name: Bootstrap node 1, 2 and 3 with current build (use Emulated TPM and ISO) in pool "master"
if: inputs.test_type == 'cli'
env:
EMULATE_TPM: true
ISO_BOOT: true
VM_INDEX: 2
POOL: master
VM_INDEX: 1
VM_NUMBERS: 3
run: |
OPERATOR_VERSION=$(kubectl get pods \
Expand All @@ -325,9 +320,10 @@ jobs:
[[ "${OPERATOR_VERSION}" == "1.0" ]] && unset EMULATE_TPM

cd tests && make e2e-bootstrap-node
- name: Bootstrap additional nodes (total of ${{ inputs.node_number }}) with current build (use iPXE)
- name: Bootstrap additional nodes (total of ${{ inputs.node_number }}) with current build (use iPXE) in pool "worker"
if: inputs.test_type == 'cli' && inputs.node_number > 3
env:
POOL: worker
VM_INDEX: 4
VM_NUMBERS: ${{ inputs.node_number }}
run: cd tests && make e2e-bootstrap-node
Expand Down Expand Up @@ -437,4 +433,3 @@ jobs:
gcloud --quiet compute instances delete ${{ needs.create-runner.outputs.runner }} \
--delete-disks all \
--zone ${{ inputs.zone }}

2 changes: 1 addition & 1 deletion tests/assets/cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ spec:
kind: MachineInventorySelectorTemplate
name: selector-master-%CLUSTER_NAME%
name: pool-master-%CLUSTER_NAME%
quantity: 1
quantity: 0
workerRole: true
- controlPlaneRole: false
etcdRole: false
Expand Down
49 changes: 19 additions & 30 deletions tests/e2e/bootstrap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,26 +82,13 @@ func getNodeInfo(hostName string, index int) (*tools.Client, string) {

var _ = Describe("E2E - Bootstrapping node", Label("bootstrap"), func() {
var (
machineRegName string
poolType string
wg sync.WaitGroup
wg sync.WaitGroup
)

BeforeEach(func() {
// Set pool type
if vmIndex < 4 {
// First third nodes are in Master pool
poolType = "master"
} else {
// The others are in Worker pool
poolType = "worker"
}

It("Provision the node", func() {
// Set MachineRegistration name based on hostname
machineRegName = "machine-registration-" + poolType + "-" + clusterName
})
machineRegName := "machine-registration-" + poolType + "-" + clusterName

It("Provision the node", func() {
By("Setting emulated TPM to "+strconv.FormatBool(emulateTPM), func() {
// Set temporary file
emulatedTmp, err := misc.CreateTemp("emulatedTPM")
Expand Down Expand Up @@ -220,22 +207,24 @@ var _ = Describe("E2E - Bootstrapping node", Label("bootstrap"), func() {
By("Ensuring that the cluster is in healthy state", func() {
checkClusterState()
})
}

By("Increasing 'quantity' node of predefined cluster", func() {
comparator := ">"
if poolType == "worker" {
// In case of worker the first value could be equal to 1
comparator = ">="
}
By("Increment number of nodes in pool "+poolType, func() {
// Increase 'quantity' field
value, err := misc.IncreaseQuantity(clusterNS,
clusterName,
"pool-"+poolType+"-"+clusterName, addedNode)
Expect(err).To(Not(HaveOccurred()))
Expect(value).To(BeNumerically(">=", 1))

// Increase 'quantity' field
value, err := misc.IncreaseQuantity(clusterNS,
clusterName,
"pool-"+poolType+"-"+clusterName, addedNode)
Expect(err).To(Not(HaveOccurred()))
Expect(value).To(BeNumerically(comparator, 1))
})
}
// Check that the selector has been correctly created
Eventually(func() string {
out, _ := kubectl.Run("get", "MachineInventorySelector",
"--namespace", clusterNS,
"-o", "jsonpath={.items[*].metadata.name}")
return out
}, misc.SetTimeout(3*time.Minute), 5*time.Second).Should(ContainSubstring("selector-" + poolType + "-" + clusterName))
})

By("Waiting for known cluster state before adding the node(s)", func() {
msg := `(configuring .* node\(s\)|waiting for viable init node)`
Expand Down
11 changes: 0 additions & 11 deletions tests/e2e/configure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,17 +101,6 @@ var _ = Describe("E2E - Configure test", Label("configure"), func() {
"-o", "jsonpath={.items[*].metadata.name}")
return out
}, misc.SetTimeout(3*time.Minute), 5*time.Second).Should(ContainSubstring("selector-" + pool + "-" + clusterName))

// Check that the selector for master is correctly created
// NOTE: the worker one is not created yet because 'quantity' is set to 0 for this one
if pool == "master" {
Eventually(func() string {
out, _ := kubectl.Run("get", "MachineInventorySelector",
"--namespace", clusterNS,
"-o", "jsonpath={.items[*].metadata.name}")
return out
}, misc.SetTimeout(3*time.Minute), 5*time.Second).Should(ContainSubstring("selector-" + pool + "-" + clusterName))
}
}
})

Expand Down
2 changes: 2 additions & 0 deletions tests/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ var (
k8sVersion string
numberOfVMs int
osImage string
poolType string
proxy string
rancherChannel string
rancherLogCollector string
Expand Down Expand Up @@ -98,6 +99,7 @@ var _ = BeforeSuite(func() {
k8sVersion = os.Getenv("K8S_VERSION_TO_PROVISION")
number := os.Getenv("VM_NUMBERS")
osImage = os.Getenv("CONTAINER_IMAGE")
poolType = os.Getenv("POOL")
proxy = os.Getenv("PROXY")
rancherChannel = os.Getenv("RANCHER_CHANNEL")
rancherLogCollector = os.Getenv("RANCHER_LOG_COLLECTOR")
Expand Down
14 changes: 7 additions & 7 deletions tests/scripts/install-vm
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ if [[ ${EMULATE_TPM} != "true" ]]; then
EMULATED_TPM="emulator,model=tpm-crb,version=2.0"
fi

# Create directories: dedicated one for storage pool + logs one
mkdir -p logs ${VM_NAME}

# iPXE stuff will not be used if ISO is set
if [[ ${ISO_BOOT} == "true" ]]; then
ISO=$(realpath ../../elemental-*.iso 2>/dev/null)
Expand All @@ -40,8 +43,11 @@ if [[ ${ISO_BOOT} == "true" ]]; then
&& echo "File ${ISO} not found! Exiting!" >&2 \
&& exit 1

# Soft-link the ISO to avoid "Could not define storage pool" error
ln -s ${ISO} ${VM_NAME}/

# Force ISO boot
INSTALL_FLAG="--cdrom ${ISO}"
INSTALL_FLAG="--cdrom ${VM_NAME}/${ISO##*/}"
else
# Create symlink for binary but only if it doesn't exist
SYM_LINK=../../ipxe.efi
Expand All @@ -62,12 +68,6 @@ else
INSTALL_FLAG="--pxe"
fi

# Create logs directory if needed
mkdir -p logs

# Create a dedicated directory for storage pool
mkdir -p ${VM_NAME}

# Wait randomly until 20s to avoid running virt-install at the same time
# Because it can lead to some issues with hugepages
sleep $((RANDOM % 20))
Expand Down