diff --git a/ods_ci/infrastructure_configuration.yaml.example b/ods_ci/infrastructure_configuration.yaml.example index a02baa4ef..9c932cc36 100644 --- a/ods_ci/infrastructure_configuration.yaml.example +++ b/ods_ci/infrastructure_configuration.yaml.example @@ -2,4 +2,5 @@ infrastructure_configurations: provider: GCP hive_cluster_name: rhodsawstest hive_claim_name: rhodsawstestclaim + use_cluster_pool: True \ No newline at end of file diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/IBM/ibmcloud-cluster.yaml b/ods_ci/tasks/Resources/Provisioning/Hive/IBM/ibmcloud-cluster.yaml new file mode 100644 index 000000000..9fbdd5a8f --- /dev/null +++ b/ods_ci/tasks/Resources/Provisioning/Hive/IBM/ibmcloud-cluster.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: Secret + metadata: + name: ibmcloud-sno-install-config + namespace: ${infrastructure_configurations}[hive_claim_ns] + type: Opaque + stringData: + install-config.yaml: | + apiVersion: v1 + baseDomain: ${infrastructure_configurations}[base_domain] + credentialsMode: Manual + compute: + - name: worker + platform: + ibmcloud: + type: ${infrastructure_configurations}[worker_node_instance_type] + replicas: ${infrastructure_configurations}[worker_node_replicas] + controlPlane: + name: master + platform: + ibmcloud: + type: ${infrastructure_configurations}[master_node_instance_type] + replicas: ${infrastructure_configurations}[master_node_replicas] + metadata: + name: ${infrastructure_configurations}[hive_cluster_name] + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + machineNetwork: + - cidr: 10.0.0.0/16 + serviceNetwork: + - 172.30.0.0/16 + fips: ${infrastructure_configurations}[fips_validation] + platform: + ibmcloud: + region: ${infrastructure_configurations}[ibmcloud_region] + pullSecret: '${infrastructure_configurations}[pull_secret]' + sshKey: ${infrastructure_configurations}[ssh_key] +- apiVersion: hive.openshift.io/v1 + kind: ClusterImageSet + metadata: + name: ${infrastructure_configurations}[image_set] + namespace: ${infrastructure_configurations}[hive_claim_ns] + spec: + releaseImage: ${infrastructure_configurations}[release_image] +- apiVersion: v1 + kind: Secret + metadata: + name: ibmcloud-creds + namespace: ${infrastructure_configurations}[hive_claim_ns] + type: Opaque + stringData: + ibmcloud_api_key: ${infrastructure_configurations}[ibmcloud_api_key] +- apiVersion: hive.openshift.io/v1 + kind: ClusterDeployment + metadata: + name: ${infrastructure_configurations}[hive_cluster_name] + namespace: ${infrastructure_configurations}[hive_claim_ns] + spec: + baseDomain: ${infrastructure_configurations}[base_domain] + clusterName: ${infrastructure_configurations}[hive_cluster_name] + platform: + ibmcloud: + region: ${infrastructure_configurations}[ibmcloud_region] + credentialsSecretRef: + name: ibmcloud-creds + provisioning: + imageSetRef: + name: ${infrastructure_configurations}[image_set] + installConfigSecretRef: + name: ibmcloud-sno-install-config + manifestsSecretRef: + name: ${infrastructure_configurations}[hive_cluster_name]-manifests diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/deprovision.robot b/ods_ci/tasks/Resources/Provisioning/Hive/deprovision.robot index 040bfdda0..f50d5b7d0 100644 --- a/ods_ci/tasks/Resources/Provisioning/Hive/deprovision.robot +++ b/ods_ci/tasks/Resources/Provisioning/Hive/deprovision.robot @@ -1,3 +1,9 @@ +*** Settings *** +Documentation Set of keywords to handle self-managed cluster deprovisioning +Library OperatingSystem +Library OpenShiftLibrary + + *** Keywords *** Set Hive Default Variables ${cluster_name} = Get Variable Value ${cluster_name} %{TEST_CLUSTER} @@ -12,27 +18,55 @@ Set Hive Default Variables Set Suite Variable ${hive_namespace} Delete Cluster Configuration - Log Deleting cluster ${cluster_name} configuration console=True - @{Delete_Cluster} = Oc Delete kind=ClusterPool name=${pool_name} - ... namespace=${hive_namespace} api_version=hive.openshift.io/v1 - Log Many @{Delete_Cluster} - ${Delete_Cluster} = Oc Delete kind=ClusterDeploymentCustomization name=${conf_name} - ... namespace=${hive_namespace} api_version=hive.openshift.io/v1 - Log Many @{Delete_Cluster} + IF ${use_cluster_pool} + Log Deleting cluster ${cluster_name} configuration console=True + @{Delete_Cluster} = Oc Delete kind=ClusterPool name=${pool_name} + ... namespace=${hive_namespace} api_version=hive.openshift.io/v1 + Log Many @{Delete_Cluster} + ${Delete_Cluster} = Oc Delete kind=ClusterDeploymentCustomization name=${conf_name} + ... namespace=${hive_namespace} api_version=hive.openshift.io/v1 + Log Many @{Delete_Cluster} + ELSE + ${Delete_Cluster} = Oc Delete kind=ClusterDeployment name=${cluster_name} + ... namespace=${hive_namespace} api_version=hive.openshift.io/v1 + ${rc} ${out} = Run And Return Rc And Output oc wait --for=delete cd/${cluster_name} --timeout 600s + Should Be Equal As Integers ${rc} ${0} ${out} + IF "${provider_type}" == "IBM" + Oc Delete kind=Secret name=${cluster_name}-manifests namespace=${hive_namespace} + ${rc} ${srv_ids} = Run And Return Rc And Output + ... ibmcloud iam service-ids --output json | jq -c '.[] | select(.name | contains("${cluster_name}-openshift-")) | .name' | tr -d '"' # robocop: disable:line-too-long + Should Be Equal As Integers ${rc} ${0} msg=${srv_ids} + ${srv_ids} = Split To Lines ${srv_ids} + FOR ${index} ${srv} IN ENUMERATE @{srv_ids} + Log ${index}: ${srv} + ${rc} ${out} = Run And Return Rc And Output ibmcloud iam service-id-delete ${srv} -f + Should Be Equal As Integers ${rc} ${0} msg=${out} + END + IF len($srv_ids) == 0 + Log message=no Service IDs found on IBM Cloud corresponding to ${cluster_name} cluster. Please check. + ... level=WARN + END + END + END Deprovision Cluster - ${cluster_claim} = Run Keyword And Return Status - ... Unclaim Cluster ${claim_name} + IF ${use_cluster_pool} + ${cluster_claim} = Run Keyword And Return Status + ... Unclaim Cluster ${claim_name} + ELSE + ${cluster_claim} = Set Variable ${FALSE} + END ${cluster_deprovision} = Run Keyword And Return Status ... Delete Cluster Configuration - IF ${cluster_claim} == False + IF ${use_cluster_pool} == True and ${cluster_claim} == False ... Log Cluster Claim ${claim_name} does not exists. Deleting Configuration console=True IF ${cluster_deprovision} == False ... Log Cluster ${cluster_name} has not been deleted. Please do it manually console=True + ... level=ERROR Log Cluster ${cluster_name} has been deprovisioned console=True Unclaim Cluster [Arguments] ${unclaimname} Oc Delete kind=ClusterClaim name=${unclaimname} namespace=${hive_namespace} ${status} = Oc Get kind=ClusterClaim name=${unclaimname} namespace=${hive_namespace} - Log ${status} console=True \ No newline at end of file + Log ${status} console=True diff --git a/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot b/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot index e953fe251..3512220d4 100644 --- a/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot +++ b/ods_ci/tasks/Resources/Provisioning/Hive/provision.robot @@ -1,5 +1,7 @@ *** Settings *** Resource deprovision.robot +Resource ../../../../tests/Resources/Common.robot +Library Process *** Keywords *** Claim Cluster @@ -8,14 +10,34 @@ Claim Cluster ... template_data=${infrastructure_configurations} Does ClusterName Exists - @{clusterpoolname} = Oc Get kind=ClusterPool namespace=${hive_namespace} api_version=hive.openshift.io/v1 - Log Many @{clusterpoolname} - Log Many ${cluster_name} - FOR ${name} IN @{clusterpoolname} - IF "${name}[metadata][name]" == "${pool_name}" - Log ${name}[metadata][name] console=True - ${clustername_exists} = Set Variable "${name}[metadata][name]" - RETURN ${clustername_exists} + [Arguments] ${use_pool}=${TRUE} + IF ${use_pool} + @{clusterpoolname} = Oc Get kind=ClusterPool namespace=${hive_namespace} + ... api_version=hive.openshift.io/v1 + Log Many @{clusterpoolname} + Log Many ${cluster_name} + FOR ${name} IN @{clusterpoolname} + IF "${name}[metadata][name]" == "${pool_name}" + Log ${name}[metadata][name] console=True + ${clustername_exists} = Set Variable "${name}[metadata][name]" + RETURN ${clustername_exists} + END + END + ELSE + ${anycluster} = Run Keyword And Return Status + ... Oc Get kind=ClusterDeployment namespace=${hive_namespace} + ... api_version=hive.openshift.io/v1 + IF ${anycluster} + ${clusterdeploymentname} = Oc Get kind=ClusterDeployment namespace=${hive_namespace} + ... api_version=hive.openshift.io/v1 fields=['spec.clusterName'] + ${clusterdeploymentname}= Set Variable ${clusterdeploymentname}[0][spec.clusterName] + Log ${clusterdeploymentname} + Log ${cluster_name} + IF "${clusterdeploymentname}" == "${cluster_name}" + RETURN True + END + ELSE + Log message=No ClusterDeployment found in ${hive_namespace}. END END RETURN False @@ -27,24 +49,36 @@ Get Clusters END Provision Cluster + [Documentation] If cluster does not exist already, it selects the + ... resource provisioning template based on the Cloud provider + ... and starts the creationg of cloud resources Log Setting cluster ${cluster_name} configuration console=True Should Be True "${hive_kubeconf}" != "${EMPTY}" - ${clustername_exists} = Does ClusterName Exists + ${clustername_exists} = Does ClusterName Exists use_pool=${use_cluster_pool} ${template} = Select Provisioner Template ${provider_type} - IF ${clustername_exists} - Log Cluster name '${cluster_name}' already exists in Hive pool '${pool_name}' - Checking if it has a valid web-console console=True - ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} - ${result} = Run Process oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status shell=yes - IF ${result.rc} != 0 - Log Cluster '${cluster_name}' has previously failed to be provisioned - Cleaning Hive resources console=True - Delete Cluster Configuration - ELSE - FAIL Cluster '${cluster_name}' is already in use, please choose a different name. - END - END + IF ${clustername_exists} Handle Already Existing Cluster Log Configuring cluster ${cluster_name} console=True Create Provider Resources +Handle Already Existing Cluster + [Documentation] Fails if the cluster already exists. It works with both ClusterPools + ... and ClusterDeployment provisioning type. + IF ${use_cluster_pool} + Log Cluster name '${cluster_name}' already exists in Hive pool '${pool_name}' - Checking if it has a valid web-console console=True # robocop: disable:line-too-long + ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} + ${result} = Run Process oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status shell=yes # robocop: disable:line-too-long + ELSE + Log Cluster name '${cluster_name}' already exists as Hive ClusterDeployment - Checking if it has a valid web-console console=True # robocop: disable:line-too-long + ${result} = Run Process oc -n ${hive_namespace} get cd ${cluster_name} -o json | jq -r '.status.webConsoleURL' --exit-status shell=yes # robocop: disable:line-too-long + END + IF ${result.rc} != 0 + Log Cluster '${cluster_name}' has previously failed to be provisioned - Cleaning Hive resources + ... console=True + Delete Cluster Configuration + ELSE + FAIL Cluster '${cluster_name}' is already in use, please choose a different name. + END + Create Provider Resources Log Creating Hive resources for cluster ${cluster_name} on ${provider_type} according to: ${template} console=True IF "${provider_type}" == "AWS" @@ -55,6 +89,12 @@ Create Provider Resources ... template_data=${infrastructure_configurations} ELSE IF "${provider_type}" == "OSP" Create Openstack Resources + ELSE IF "${provider_type}" == "IBM" + Create IBM CredentialsRequests And Service IDs + Create IBM Manifests Secret + ${hive_yaml} = Set Variable ${artifacts_dir}/${cluster_name}_hive.yaml + Create File From Template ${template} ${hive_yaml} + Oc Apply kind=List src=${hive_yaml} api_version=v1 ELSE FAIL Invalid provider name END @@ -70,6 +110,9 @@ Select Provisioner Template ELSE IF "${provider_type}" == "OSP" Set Task Variable ${template} tasks/Resources/Provisioning/Hive/OSP/hive_osp_cluster_template.yaml Log Setting OSP Hive Template ${template} console=True + ELSE IF "${provider_type}" == "IBM" + Set Task Variable ${template} tasks/Resources/Provisioning/Hive/IBM/ibmcloud-cluster.yaml + Log Setting IBM Hive Template ${template} console=True ELSE FAIL Invalid provider name END @@ -115,7 +158,12 @@ Watch Hive Install Log WHILE True limit=${hive_timeout} on_limit_message=Hive Install ${hive_timeout} Timeout Exceeded # robotcode: ignore ${old_log_data} = Get File ${install_log_file} ${last_line_index} = Get Line Count ${old_log_data} - ${pod} = Oc Get kind=Pod namespace=${namespace} + IF ${use_cluster_pool} + ${pod} = Oc Get kind=Pod namespace=${namespace} + ELSE + ${pod} = Oc Get kind=Pod namespace=${namespace} + ... label_selector=hive.openshift.io/cluster-deployment-name=${cluster_name} + END TRY ${new_log_data} = Oc Get Pod Logs name=${pod[0]['metadata']['name']} container=hive namespace=${namespace} EXCEPT @@ -145,31 +193,43 @@ Watch Hive Install Log Should Contain ${new_log_data} install completed successfully Wait For Cluster To Be Ready - ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} - Set Task Variable ${pool_namespace} - Log Watching Hive Pool namespace: ${pool_namespace} console=True + IF ${use_cluster_pool} + ${pool_namespace} = Get Cluster Pool Namespace ${pool_name} + Set Task Variable ${pool_namespace} + Set Task Variable ${clusterdeployment_name} ${pool_namespace} + Log Watching Hive Pool namespace: ${pool_namespace} console=True + ELSE + Set Task Variable ${pool_namespace} ${hive_namespace} + Set Task Variable ${clusterdeployment_name} ${cluster_name} + Log Watching Hive ClusterDeployment namespace: ${pool_namespace} console=True + END ${install_log_file} = Set Variable ${artifacts_dir}/${cluster_name}_install.log Create File ${install_log_file} Run Keyword And Ignore Error Watch Hive Install Log ${pool_namespace} ${install_log_file} Log Verifying that Cluster '${cluster_name}' has been provisioned and is running according to Hive Pool namespace '${pool_namespace}' console=True # robocop: disable:line-too-long ${provision_status} = Run Process - ... oc -n ${pool_namespace} wait --for\=condition\=ProvisionFailed\=False cd ${pool_namespace} --timeout\=15m + ... oc -n ${pool_namespace} wait --for\=condition\=ProvisionFailed\=False cd ${clusterdeployment_name} --timeout\=15m # robocop: disable:line-too-long ... shell=yes ${web_access} = Run Process - ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status + ... oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o json | jq -r '.status.webConsoleURL' --exit-status # robocop: disable:line-too-long ... shell=yes - ${claim_status} = Run Process - ... oc -n ${hive_namespace} wait --for\=condition\=ClusterRunning\=True clusterclaim ${claim_name} --timeout\=15m shell=yes # robocop: disable:line-too-long + IF ${use_cluster_pool} + ${custer_status} = Run Process + ... oc -n ${hive_namespace} wait --for\=condition\=ClusterRunning\=True clusterclaim ${claim_name} --timeout\=15m shell=yes # robocop: disable:line-too-long + ELSE + ${custer_status} = Run Process + ... oc -n ${hive_namespace} wait --for\=condition\=Ready\=True clusterdeployment ${clusterdeployment_name} --timeout\=15m shell=yes # robocop: disable:line-too-long + END # Workaround for old Hive with Openstack - Cluster is displayed as Resuming even when it is Running # add also support to the new Hive where the Cluster is displayed as Running IF "${provider_type}" == "OSP" - ${claim_status} = Run Process - ... oc -n ${hive_namespace} get clusterclaim ${claim_name} -o json | jq '.status.conditions[] | select(.type\=\="ClusterRunning" and (.reason\=\="Resuming" or .reason\=\="Running"))' --exit-status shell=yes + ${custer_status} = Run Process + ... oc -n ${hive_namespace} get clusterclaim ${claim_name} -o json | jq '.status.conditions[] | select(.type\=\="ClusterRunning" and (.reason\=\="Resuming" or .reason\=\="Running"))' --exit-status shell=yes # robocop: disable:line-too-long END - IF ${provision_status.rc} != 0 or ${web_access.rc} != 0 or ${claim_status.rc} != 0 - ${provision_status} = Run Process oc -n ${pool_namespace} get cd ${pool_namespace} -o json shell=yes - ${claim_status} = Run Process oc -n ${hive_namespace} get clusterclaim ${claim_name} -o json shell=yes - Log Cluster '${cluster_name}' deployment had errors, see: ${\n}${provision_status.stdout}${\n}${claim_status.stdout} level=ERROR # robocop: disable:line-too-long + IF ${provision_status.rc} != 0 or ${web_access.rc} != 0 or ${custer_status.rc} != 0 + ${provision_status} = Run Process oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o json shell=yes # robocop: disable:line-too-long + ${custer_status} = Run Process oc -n ${hive_namespace} get clusterclaim ${claim_name} -o json shell=yes + Log Cluster '${cluster_name}' deployment had errors, see: ${\n}${provision_status.stdout}${\n}${custer_status.stdout} level=ERROR # robocop: disable:line-too-long Log Cluster '${cluster_name}' install completed, but it is not accessible - Cleaning Hive resources now ... console=True Deprovision Cluster @@ -180,23 +240,23 @@ Wait For Cluster To Be Ready Save Cluster Credentials Set Task Variable ${cluster_details} ${artifacts_dir}/${cluster_name}_details.txt ${result} = Run Process - ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.webConsoleURL' --exit-status + ... oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o json | jq -r '.status.webConsoleURL' --exit-status # robocop: disable:line-too-long ... shell=yes Should Be True ${result.rc} == 0 - ... Hive Cluster deployment '${pool_namespace}' does not have a valid webConsoleURL access + ... Hive Cluster deployment '${clusterdeployment_name}' does not have a valid webConsoleURL access Create File ${cluster_details} console=${result.stdout}\n ${result} = Run Process - ... oc -n ${pool_namespace} get cd ${pool_namespace} -o json | jq -r '.status.apiURL' --exit-status + ... oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o json | jq -r '.status.apiURL' --exit-status ... shell=yes Append To File ${cluster_details} api=${result.stdout}\n - ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${pool_namespace} -o jsonpath\='{.spec.clusterMetadata.adminPasswordSecretRef.name}') --to\=${artifacts_dir} + ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o jsonpath\='{.spec.clusterMetadata.adminPasswordSecretRef.name}') --to\=${artifacts_dir} # robocop: disable:line-too-long ... shell=yes Should Be True ${result.rc} == 0 ${username} = Get File ${artifacts_dir}/username ${password} = Get File ${artifacts_dir}/password Append To File ${cluster_details} username=${username}\n Append To File ${cluster_details} password=${password}\n - ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${pool_namespace} -o jsonpath\='{.spec.clusterMetadata.adminKubeconfigSecretRef.name}') --to\=${artifacts_dir} + ${result} = Run Process oc extract -n ${pool_namespace} --confirm secret/$(oc -n ${pool_namespace} get cd ${clusterdeployment_name} -o jsonpath\='{.spec.clusterMetadata.adminKubeconfigSecretRef.name}') --to\=${artifacts_dir} # robocop: disable:line-too-long ... shell=yes Should Be True ${result.rc} == 0 @@ -219,3 +279,23 @@ Get Cluster Pool Namespace ... Oc Get kind=Namespace label_selector=hive.openshift.io/cluster-pool-name=${hive_pool_name} ${pool_namespace} = Set Variable ${namespace[0]['metadata']['name']} RETURN ${pool_namespace} + +Create IBM CredentialsRequests And Service IDs + [Documentation] Creates the credentials requests manifests and Service IDs resources + ... necessary for creating IBM Cloud resources + ... ref: https://github.com/openshift/cloud-credential-operator/blob/master/docs/ccoctl.md#ibmcloud + ${result}= Run Process command=mkdir credreqs ; oc adm release extract --cloud=ibmcloud --credentials-requests ${release_image} --to=./credreqs # robocop: disable:line-too-long + ... shell=yes + Should Be True ${result.rc} == 0 msg=${result.stderr} + Set Log Level NONE + ${result}= Run Process command=export IC_API_KEY=${infrastructure_configurations}[ibmcloud_api_key] && ccoctl ibmcloud create-service-id --credentials-requests-dir ./credreqs --name ${cluster_name} # robocop: disable:line-too-long + ... shell=yes + Set Log Level INFO + Should Be True ${result.rc} == 0 msg=${result.stderr} + +Create IBM Manifests Secret + [Documentation] Creates Secrets in hive containing the CredentialsRequests manifests + ${result}= Run Process command=oc create secret generic ${cluster_name}-manifests -n ${hive_namespace} --from-file=./manifests # robocop: disable:line-too-long + ... shell=yes + Log ${result.stderr} + Should Be True ${result.rc} == 0 msg=${result.stderr} diff --git a/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot b/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot index ccc17e236..657d23482 100644 --- a/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot +++ b/ods_ci/tasks/Tasks/provision_self_managed_cluster.robot @@ -1,5 +1,8 @@ *** Settings *** -Documentation Provision Self-Managed clusters for testing Beta +Documentation Provision Self-Managed clusters for testing. The cluster provisioning +... currently leverages on Hive ClusterPool for AWS, GCP and OpenStack (PSI). +... For IBM Cloud it leverages on Hive ClusterDeployment since Pools are not currently +... supported for this provider in Hive. Metadata provision Resource ../../tests/Resources/Common.robot Resource ../Resources/Provisioning/Hive/provision.robot @@ -16,17 +19,20 @@ ${hive_kubeconf} %{KUBECONFIG} ${cluster_name} ${infrastructure_configurations}[hive_cluster_name] ${hive_namespace} ${infrastructure_configurations}[hive_claim_ns] ${provider_type} ${infrastructure_configurations}[provider] +${release_image} ${infrastructure_configurations}[release_image] ${claim_name} ${cluster_name}-claim ${pool_name} ${cluster_name}-pool ${conf_name} ${cluster_name}-conf ${artifacts_dir} ${OUTPUT DIR} + *** Tasks *** Provision Self-Managed Cluster [Documentation] Provision a self-managed cluster [Tags] self_managed_provision + [Setup] Set ClusterPool Variables Provision Cluster - Claim Cluster + IF ${use_cluster_pool} Claim Cluster Wait For Cluster To Be Ready Save Cluster Credentials Login To Cluster @@ -35,7 +41,9 @@ Provision Self-Managed Cluster Deprovision Self-Managed Cluster [Documentation] Deprovision a self-managed cluster [Tags] self_managed_deprovision - [Setup] Set Hive Default Variables + [Setup] Run Keywords Set Hive Default Variables + ... AND + ... Set ClusterPool Variables Deprovision Cluster Add GPU Node To Self-Managed AWS Cluster @@ -55,3 +63,19 @@ Disconnect Self-Managed Cluster [Documentation] Disconnect a self-managed cluster [Tags] self_managed_disconnect Disconnect Cluster + + +*** Keywords *** +Set ClusterPool Variables + [Documentation] Set the variable to instruct the task about using ClusterPool + ... or ClusterDeployment provisioning type. + ... You can set the value of 'use_cluster_pool' key in the infrastructure_configurations yaml file. + ... By default (if the key is not set in the yaml) it uses ClusterPool + ${key_present}= Run Keyword And Return Status Dictionary Should Contain Key + ... ${infrastructure_configurations} use_cluster_pool + IF ${key_present} + Set Task Variable ${use_cluster_pool} ${infrastructure_configurations}[use_cluster_pool] + ELSE + # use ClusterPool as default option + Set Task Variable ${use_cluster_pool} ${TRUE} + END