Skip to content

Commit

Permalink
Merge pull request #4299 from hmaheswa/cephadm_rgw_ssl_generate_cert
Browse files Browse the repository at this point in the history
adding support for testing rgw ssl deployment with cephadm generate_cert
  • Loading branch information
mergify[bot] authored Dec 31, 2024
2 parents 1dce7b1 + 4ec4f46 commit 5e836fc
Showing 1 changed file with 349 additions and 0 deletions.
349 changes: 349 additions & 0 deletions suites/squid/rgw/tier-2_rgw_ssl_cephadm_gen_cert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,349 @@
# Test suite for evaluating RGW multi-site having SSL endpoints generated by cephadm.
#
# This suite deploys a single realm (India) spanning across two RHCS clusters. It has a
# zonegroup (shared) which also spans across the clusters. There exists a master (pri)
# and secondary (sec) zones within this group. The master zone is part of the pri
# cluster whereas the sec zone is part of the sec datacenter (cluster).

# conf: conf/squid/rgw/rgw_multisite.yaml
# The deployment is evaluated by running IOs across the environments.
---
tests:

# Cluster deployment stage

- test:
abort-on-fail: true
desc: Install software pre-requisites for cluster deployment.
module: install_prereq.py
name: setup pre-requisites

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
registry-url: registry.redhat.io
mon-ip: node1
orphan-initial-daemons: true
initial-dashboard-password: admin@123
dashboard-password-noupdate: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
- config:
command: apply_spec
service: orch
specs:
- service_type: rgw
service_id: shared.pri
spec:
generate_cert: true
ssl: true
zonegroup_hostnames:
- s3.example.com
placement:
nodes:
- node5
ceph-sec:
config:
verify_cluster_health: true
steps:
- config:
command: bootstrap
service: cephadm
args:
registry-url: registry.redhat.io
mon-ip: node1
orphan-initial-daemons: true
initial-dashboard-password: admin@123
dashboard-password-noupdate: true
- config:
command: add_hosts
service: host
args:
attach_ip_address: true
labels: apply-all-labels
- config:
command: apply
service: mgr
args:
placement:
label: mgr
- config:
command: apply
service: mon
args:
placement:
label: mon
- config:
command: apply
service: osd
args:
all-available-devices: true
- config:
command: apply_spec
service: orch
specs:
- service_type: rgw
service_id: shared.sec
spec:
generate_cert: true
ssl: true
zonegroup_hostnames:
- s3.example.com
placement:
nodes:
- node5
desc: RHCS cluster deployment and rgw deployment with ssl cert generation
polarion-id: CEPH-83575222
destroy-cluster: false
module: test_cephadm.py
name: deploy cluster and rgw with ssl gen-cert

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
command: add
id: client.1
node: node6
install_packages:
- ceph-common
copy_admin_keyring: true
ceph-sec:
config:
command: add
id: client.1
node: node6
install_packages:
- ceph-common
copy_admin_keyring: true
desc: Configure the RGW client system
polarion-id: CEPH-83573758
destroy-cluster: false
module: test_client.py
name: configure client

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
commands:
- "radosgw-admin realm create --rgw-realm india --default"
- "radosgw-admin zonegroup create --rgw-realm india --rgw-zonegroup shared --endpoints https://{node_ip:node5} --master --default"
- "radosgw-admin zone create --rgw-realm india --rgw-zonegroup shared --rgw-zone primary --endpoints https://{node_ip:node5} --master --default"
- "radosgw-admin period update --rgw-realm india --commit"
- "radosgw-admin user create --uid=repuser --display_name='Replication user' --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d --rgw-realm india --system"
- "radosgw-admin zone modify --rgw-realm india --rgw-zonegroup shared --rgw-zone primary --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin period update --rgw-realm india --commit"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_realm india"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_zonegroup shared"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_zone primary"
- "ceph config set client.rgw rgw_verify_ssl False"
- "ceph config set client.rgw.{daemon_id:shared.pri} rgw_verify_ssl False"
- "ceph orch restart {service_name:shared.pri}"
- "ceph orch cert-store get cert cephadm_root_ca_cert > /home/cephuser/cephadm-root-ca.crt"
- "sudo yum install -y sshpass"
- "sleep 20"
- "sshpass -p 'passwd' scp /home/cephuser/cephadm-root-ca.crt root@{node_ip:ceph-sec#node5}:/etc/pki/ca-trust/source/anchors/"
- "sshpass -p 'passwd' scp /home/cephuser/cephadm-root-ca.crt root@{node_ip:ceph-sec#node6}:/etc/pki/ca-trust/source/anchors/"
ceph-sec:
config:
commands:
- "sudo update-ca-trust"
- "ceph config set client.rgw rgw_verify_ssl False"
- "radosgw-admin realm pull --rgw-realm india --url https://{node_ip:ceph-pri#node5} --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d --default"
- "radosgw-admin period pull --url https://{node_ip:ceph-pri#node5} --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin zone create --rgw-realm india --rgw-zonegroup shared --rgw-zone secondary --endpoints https://{node_ip:node5} --access-key 21e86bce636c3aa0 --secret cf764951f1fdde5d"
- "radosgw-admin period update --rgw-realm india --commit"
- "ceph config set client.rgw rgw_verify_ssl False"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_verify_ssl False"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_realm india"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_zonegroup shared"
- "ceph config set client.rgw.{daemon_id:shared.sec} rgw_zone secondary"
- "ceph orch restart {service_name:shared.sec}"
- "sudo yum install -y sshpass"
- "sleep 120"
- "ceph orch cert-store get cert cephadm_root_ca_cert > /home/cephuser/cephadm-root-ca.crt"
- "sshpass -p 'passwd' scp /home/cephuser/cephadm-root-ca.crt root@{node_ip:ceph-pri#node5}:/etc/pki/ca-trust/source/anchors/"
- "sshpass -p 'passwd' scp /home/cephuser/cephadm-root-ca.crt root@{node_ip:ceph-pri#node6}:/etc/pki/ca-trust/source/anchors/"
desc: Setting up RGW multisite replication environment
module: exec.py
name: setup multisite
polarion-id: CEPH-10362

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
role: rgw
sudo: True
commands:
- "update-ca-trust"
ceph-sec:
config:
role: rgw
sudo: True
commands:
- "update-ca-trust"
desc: update-ca-trust on rgw nodes
polarion-id: CEPH-83575227
module: exec.py
name: update-ca-trust on rgw nodes

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
role: client
sudo: True
commands:
- "update-ca-trust"
ceph-sec:
config:
role: client
sudo: True
commands:
- "update-ca-trust"
desc: update-ca-trust on client nodes
polarion-id: CEPH-83575227
module: exec.py
name: update-ca-trust on client nodes

- test:
abort-on-fail: true
clusters:
ceph-pri:
config:
commands:
- "radosgw-admin sync status"
- "ceph -s"
- "radosgw-admin realm list"
- "radosgw-admin zonegroup list"
- "radosgw-admin zone list"
desc: Retrieve the configured environment details
polarion-id: CEPH-83575227
module: exec.py
name: get shared realm info on primary

- test:
abort-on-fail: true
clusters:
ceph-sec:
config:
commands:
- "radosgw-admin sync status"
- "ceph -s"
- "radosgw-admin realm list"
- "radosgw-admin zonegroup list"
- "radosgw-admin zone list"
desc: Retrieve the configured environment details
polarion-id: CEPH-83575227
module: exec.py
name: get shared realm info on secondary



# Test work flow

- test:
clusters:
ceph-pri:
config:
set-env: true
script-name: user_create.py
config-file-name: non_tenanted_user.yaml
copy-user-info-to-site: ceph-sec
desc: create non-tenanted user
module: sanity_rgw_multisite.py
name: create non-tenanted user
polarion-id: CEPH-83575199

- test:
name: enable compression on secondary
desc: test_Mbuckets_with_Nobjects_compression on secondary
polarion-id: CEPH-11350
module: sanity_rgw_multisite.py
clusters:
ceph-sec:
config:
script-name: test_Mbuckets_with_Nobjects.py
verify-io-on-site: ["ceph-pri"]
config-file-name: test_Mbuckets_with_Nobjects_compression.yaml

- test:
name: download objects on secondary
desc: test_Mbuckets_with_Nobjects_download on secondary
polarion-id: CEPH-14237
module: sanity_rgw_multisite.py
clusters:
ceph-sec:
config:
script-name: test_Mbuckets_with_Nobjects.py
verify-io-on-site: ["ceph-pri"]
config-file-name: test_Mbuckets_with_Nobjects_download.yaml

- test:
name: test encryption on secondary
desc: test_Mbuckets_with_Nobjects_enc on secondary
polarion-id: CEPH-11358 # also applies to CEPH-11361
module: sanity_rgw_multisite.py
clusters:
ceph-sec:
config:
script-name: test_Mbuckets_with_Nobjects.py
verify-io-on-site: ["ceph-pri"]
config-file-name: test_Mbuckets_with_Nobjects_enc.yaml

- test:
name: Test rgw through CURL
desc: Test rgw through CURL
polarion-id: CEPH-83575572
module: sanity_rgw.py
clusters:
ceph-pri:
config:
script-name: ../curl/test_rgw_using_curl.py
config-file-name: ../../curl/configs/test_rgw_using_curl.yaml

- test:
name: Test Etag not empty for complete multipart upload in aws
desc: Test Etag not empty for complete multipart upload in aws
polarion-id: CEPH-9801
module: sanity_rgw.py
clusters:
ceph-pri:
config:
script-name: ../aws/test_aws.py
config-file-name: ../../aws/configs/test_complete_multipart_upload_etag_not_empty.yaml

0 comments on commit 5e836fc

Please sign in to comment.