it's my config file for CEPH Cluster
* n01.sds.srv.local - 10.110.27.10, 10.110.28.10
* n02.sds.srv.local - 10.110.27.11, 10.110.28.11
* n03.sds.srv.local - 10.110.27.12, 10.110.28.12
* n04.sds.srv.local - 10.110.27.13, 10.110.28.13
# ceph-deploy install --release=nautilus n0{1,2,3,4}.sds.srv.local
# ceph-deploy --overwrite-conf admin n0{1,2,3,4}.sds.srv.local
# ceph-deploy mon create-initial
if necessary creal data and metadata information from HDD
## delete old lvm volumes
# for i in `vgs|grep ceph-|awk '{print $1}'`;do vgremove -y $i;done
# ceph-deploy disk zap n01:sd{b,c,d}
# ceph-deploy osd create --data n01:sd{b,c,d}
# ceph-deploy mgr create n0{1,2,3,4}
# ceph-deploy mon destroy n01
# ceph-deploy purge n0{1,2,3,4}
# ceph-deploy purgedata n0{1,2,3,4}
# ceph osd pool create rbdpool 128
# ceph osd pool application enable rbdpool rbd
# rbd create rbdpool/my-test-image --size 102400
# rbd rm rbdpool/my-test-image
use rbd
# uname -r
3.10.0-1062.1.2.el7.x86_64
# rbd feature disable rbdpool/my-test-image object-map fast-diff deep-flatten
# mkfs /dev/rbd0
# mount /dev/rbd/rbdpool/my-test-image /mnt/