3 serveurs OVH Advance-Stor
CPU : AMD EPYC 4344P - 8c/16t - 3.8GHz/5.3GHz
RAM : 32GB DDR5 5200MHz
Disques : 2×960 GB SSD SATA + 4×22 TB HDD SAS
Partitionnement :

Si besoin de debug ajouter : “-vvv -e ‘debops__no_log=false’”
ansible-playbook -i inventory/000_hosts playbooks/debops_bootstrap.yml --vault-id @prompt --flush-cache --user debian --limit ceph_backup
ansible-playbook -i inventory/000_hosts playbooks/debops_site.yml --vault-id @prompt --flush-cache --limit ceph_backup
reboot
openSeaChest_Firmware -d all --modelMatch ST22000NM004E --fwdlInfo | grep Revision
openSeaChest_Firmware -d all --modelMatch ST22000NM004E --downloadFW LongsPeakBPExosX22SAS-SED-512E-ET02.LOD
openSeaChest_Firmware -d all --modelMatch ST22000NM004E --downloadFW LongsPeakBPExosX22SAS-SED-512E-ET03.LOD
openSeaChest_Configure -d all --modelMatch ST22000NM004E --writeCache info
openSeaChest_Configure -d all --modelMatch ST22000NM004E --writeCache disable
for i in $(seq 0 3); do echo "write through" > /sys/class/scsi_disk/0\:0\:$i\:0/cache_type; done
CEPH_RELEASE=18.2.2 ; curl --silent --remote-name --location https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm
chmod +x cephadm
sudo ./cephadm add-repo --release reef
sudo chmod 644 /etc/apt/trusted.gpg.d/ceph.release.gpg
sudo ./cephadm add-repo --release reef
sudo ./cephadm install cephadm ceph ceph-volume ceph-fuse
rm cephadm
cephadm bootstrap --mon-id on004.cephbackup --mon-ip 192.168.1.1 --cluster-network 192.168.1.0/24
Copier le contenu de /etc/ceph/ceph.pub dans ~/work/infras/inventory/group_vars/ceph_backup/debops_users.yml puis lancer :
ansible-playbook -i inventory/000_hosts playbooks/debops_site.yml --vault-id @prompt --flush-cache --limit ceph_backup --tags role::root_account
ceph orch host add ov005 192.168.1.2 --labels _admin
ceph orch host add ov006 192.168.1.3 --labels _admin
Sur ov004 :
for i in $(seq 0 3); do lvcreate -L 50GB -n db-$i vg; done
ceph orch daemon add osd ov004:data_devices=/dev/sda,db_devices=/dev/vg/db-0,osds_per_device=1
ceph orch daemon add osd ov004:data_devices=/dev/sdb,db_devices=/dev/vg/db-1,osds_per_device=1
ceph orch daemon add osd ov004:data_devices=/dev/sdc,db_devices=/dev/vg/db-2,osds_per_device=1
ceph orch daemon add osd ov004:data_devices=/dev/sdd,db_devices=/dev/vg/db-3,osds_per_device=1
Sur ov005 :
for i in $(seq 0 3); do lvcreate -L 50GB -n db-$i vg; done
ceph orch daemon add osd ov005:data_devices=/dev/sda,db_devices=/dev/vg/db-0,osds_per_device=1
ceph orch daemon add osd ov005:data_devices=/dev/sdb,db_devices=/dev/vg/db-1,osds_per_device=1
ceph orch daemon add osd ov005:data_devices=/dev/sdc,db_devices=/dev/vg/db-2,osds_per_device=1
ceph orch daemon add osd ov005:data_devices=/dev/sdd,db_devices=/dev/vg/db-3,osds_per_device=1
Sur ov006 :
for i in $(seq 0 3); do lvcreate -L 50GB -n db-$i vg; done
ceph orch daemon add osd ov006:data_devices=/dev/sda,db_devices=/dev/vg/db-0,osds_per_device=1
ceph orch daemon add osd ov006:data_devices=/dev/sdb,db_devices=/dev/vg/db-1,osds_per_device=1
ceph orch daemon add osd ov006:data_devices=/dev/sdc,db_devices=/dev/vg/db-2,osds_per_device=1
ceph orch daemon add osd ov006:data_devices=/dev/sdd,db_devices=/dev/vg/db-3,osds_per_device=1
ceph orch apply rgw cephbackup --realm=eu-west --zonegroup=ovh --zone=backups
ceph dashboard set-rgw-credentials
ceph config-key set rgw/cert/rgw.cephbackup -i /etc/pki/realms/pkgdata.backup/private/key_chain.pem
et vérifier :
ceph orch ls --service-type rgw --export
ceph config-key get rgw/cert/rgw.cephbackup
Ignorer la vérification du certificat depuis le dashboard
SSLCertVerificationError hostname ov005 doesn't match either of *.pkgdata.com, pkgdata.com
ceph dashboard set-rgw-api-ssl-verify False
# radosgw-admin realm list
radosgw-admin realm create --rgw-realm=eu-west --default
radosgw-admin realm default --rgw-realm=eu-west
radosgw-admin zonegroup create --rgw-zonegroup=ovh --rgw-realm=eu-west --master --default
radosgw-admin zonegroup default --rgw-zonegroup=ovh
radosgw-admin zone create --rgw-zone=backups --rgw-zonegroup=ovh --master --default
radosgw-admin zone default --rgw-zone=backups
radosgw-admin period update --commit
# ceph df
# ceph osd erasure-code-profile get raid5
# ceph osd lspools
ceph osd erasure-code-profile set raid5 k=2 m=1
ceph osd pool create backup-data-ec01 erasure raid5
ceph osd pool application enable backup-data-ec01 rgw
radosgw-admin zonegroup get | jq '.placement_targets += [{"name": "'"ec_placement"'", "tags": [], "storage_classes": ["STANDARD"]}]' > zonegroup.json
radosgw-admin zonegroup set --infile zonegroup.json
radosgw-admin zone get | jq '.placement_pools += [{"key": "'"ec_placement"'", "val": {"index_pool": "backups.rgw.buckets.index", "storage_classes": {"STANDARD": {"data_pool": "'"backup-data-ec01"'"}}, "data_extra_pool": "backups.rgw.buckets.non-ec", "index_type": 0}}]' > zone.json
radosgw-admin zone set --infile zone.json
ceph orch ps
ceph orch daemon restart rgw.cephbackup.ov004.rydmwh
ceph orch daemon restart rgw.cephbackup.ov005.hpfpvf
radosgw-admin user create --uid='user1' --display-name='First User' --access-key='S3user1' --secret-key='S3user1key'
radosgw-admin subuser create --uid='user1' --subuser='user1:swift' --secret-key='Swiftuser1key' --access=full
cp secret/pki/authorities/root/subject/cert.pem /usr/local/share/ca-certificates/root-ca.pkgdata.backup.crt
sudo update-ca-certificates -f
ceph config set osd osd_mclock_override_recovery_settings true
ceph config set osd osd_mclock_profile high_recovery_ops