Skip to content

Commit

Permalink
Add cluster configuration for AZ regression suites
Browse files Browse the repository at this point in the history
Signed-off-by: deepssin <deepssin@redhat.com>
  • Loading branch information
deepssin committed Feb 18, 2025
1 parent ae0e218 commit cb375eb
Show file tree
Hide file tree
Showing 2 changed files with 271 additions and 0 deletions.
106 changes: 106 additions & 0 deletions conf/squid/common/9node-1client-availability-zone.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
# Test Suite that deploys 3 AZs in different subnets
# Deployment for all the ceph daemons , with 9 mon's, 6 mgr's, 9 OSD daemons

globals:
- ceph-cluster:
name: ceph
node1:
networks:
- shared_net_15
role:
- _admin
- mon
- mgr
- installer
- alertmanager
- osd
no-of-volumes: 4
disk-size: 15
node2:
networks:
- shared_net_15
role:
- mon
- mgr
- rgw
- osd
- grafana
no-of-volumes: 4
disk-size: 15
node3:
networks:
- shared_net_15
role:
- osd
- mon
- mds
- prometheus
no-of-volumes: 4
disk-size: 15
node4:
networks:
- shared_net_2
role:
- _admin
- mon
- mgr
- osd
- alertmanager
no-of-volumes: 4
disk-size: 15
node5:
networks:
- shared_net_2
role:
- mon
- mgr
- rgw
- osd
- grafana
no-of-volumes: 4
disk-size: 15
node6:
networks:
- shared_net_2
role:
- osd
- mon
- mds
- prometheus
no-of-volumes: 4
disk-size: 15
node7:
networks:
- shared_net_5
role:
- _admin
- mon
- mgr
- osd
- alertmanager
no-of-volumes: 4
disk-size: 15
node8:
networks:
- shared_net_5
role:
- mon
- mgr
- rgw
- osd
- grafana
no-of-volumes: 4
disk-size: 15
node9:
networks:
- shared_net_5
role:
- osd
- mon
- mds
- prometheus
no-of-volumes: 4
disk-size: 15
node10:
role:
- client
165 changes: 165 additions & 0 deletions suites/squid/common/regression/AZ_cluster_deploy_and_configure.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
# Use cluster-conf file: conf/squid/common/9node-1client-availability-zone.yaml
# 3 AZ cluster deployment tests
tests:
- test:
name: Install ceph pre-requisites
desc: installation of ceph pre-requisites
module: install_prereq.py
abort-on-fail: true

- test:
name: Cephadm Bootstrap with apply-spec
desc: Apply spec in Bootstrap with host location attributes
module: test_bootstrap.py
polarion-id: CEPH-83575289
config:
command: bootstrap
base_cmd_args:
verbose: true
args:
mon-ip: node1
ssh-user: cephuser
apply-spec:
- service_type: host
address: true
labels: apply-all-labels
nodes:
- node1
- node2
- node3
location:
root: default
datacenter: DC1
- service_type: host
address: true
labels: apply-all-labels
nodes:
- node4
- node5
- node6
location:
root: default
datacenter: DC2
- service_type: host
address: true
labels: apply-all-labels
nodes:
- node7
- node8
- node9
location:
root: default
datacenter: DC3
- service_type: mon
spec:
crush_locations:
node1:
- datacenter=DC1
node2:
- datacenter=DC1
node3:
- datacenter=DC1
node4:
- datacenter=DC2
node5:
- datacenter=DC2
node6:
- datacenter=DC2
node7:
- datacenter=DC3
node8:
- datacenter=DC3
node9:
- datacenter=DC3
placement:
label: mon
- service_type: mgr
placement:
label: mgr
destroy-cluster: false
abort-on-fail: true

- test:
name: Service deployment with spec
desc: Add OSD services using spec file.
module: test_cephadm.py
polarion-id: CEPH-83573746
config:
steps:
- config:
command: apply_spec
service: orch
validate-spec-services: true
specs:
- service_type: osd
service_id: all-available-devices
placement:
label: osd
spec:
data_devices:
all: "true" # boolean as string
- config:
command: shell
args: # display OSD tree
- "ceph osd tree"

- test:
name: MDS Service deployment with spec
desc: Add MDS services using spec file
module: test_cephadm.py
polarion-id: CEPH-83574728
config:
steps:
- config:
command: shell
args: # arguments to ceph orch
- ceph
- fs
- volume
- create
- cephfs
- config:
command: apply_spec
service: orch
validate-spec-services: true
specs:
- service_type: mds
service_id: cephfs
placement:
label: mds

- test:
name: RGW Service deployment
desc: RGW Service deployment
module: test_cephadm.py
polarion-id: CEPH-83574728
config:
steps:
- config:
command: apply
service: rgw
pos_args:
- rgw.1
args:
placement:
label: rgw

- test:
name: Configure client admin
desc: Configures client admin node on cluster
module: test_client.py
polarion-id:
config:
command: add
id: client.1 # client Id (<type>.<Id>)
node: node10
install_packages:
- ceph-common
- ceph-base
copy_admin_keyring: true # Copy admin keyring to node
caps: # authorize client capabilities
mon: "allow *"
osd: "allow *"
mds: "allow *"
mgr: "allow *"
abort-on-fail: true

0 comments on commit cb375eb

Please sign in to comment.