Ceph file system installation,
Yum install-y wgetwget https://pypi.python.org/packages/source/p/pip/pip-1.5.6.tar.gz#md5=01026f87978932060cc86c1dc527903etar zxvf pip-1.5.6.tar.gzcd pip-1.5.6python setup. py buildpython setup. py installssh-keygen ################################## echo" ceph-admin ">/etc/hostname # echo" ceph-node1 ">/etc/hostname # echo" ceph-node2 ">/etc/hostname # echo" ceph-node3 ">/etc/hostname # reboot ############### ################# Cat>/etc/hosts <EOF192.168.55.185 ceph-admin192.168.55.186 ceph-node1192.168.55.187 ceph-node2192.168.55.188 ceph-node3EOFssh-copy-id root @ ceph-node1 & sh-copy- id root @ ceph-node2 & ssh-copy-id root @ ceph-node3ssh root @ ceph-node1 systemctl stop firewalld & setenforce 0ssh root @ ceph-node2 systemctl stop firewalld & setenforce 0ssh root @ ceph-node3 systemctl stop firewalld & set Enforce 0cat>/root/. ssh/config <EOFHost ceph-node1 Hostname ceph-node1 User rootHost ceph-node2 Hostname ceph-node2 User rootHost ceph-node3 Hostname ceph-node3 User rootEOFmkdir ~ /My-clustercd ~ /My-clusterpip install ceph-deployceph-deploy new ceph-node1 ceph-node2 ceph-node3ceph-deploy ceph-node1 install ceph-node2 ceph-node3ceph-deploy ceph-node1 mon create-initialceph-deploy mon create ceph-node2 ceph-node3ceph-deploy ceph-node1 ceph-node2 gatherkeys ceph-node3 ####### ######################################## ############################## ceph-deploy -- overwrite-conf mon create ceph-nod E1 ceph-node2 ceph-node3 ##################################### ######################################## # mkfs. xfs/dev/sdb # mount/dev/sdb/opt/ceph/ssh root @ ceph-node1 mkdir/opt/cephssh root @ ceph-node2 mkdir/opt/cephssh root @ ceph-node3 mkdir/opt/ ceph-deploy osd prepare ceph-node1: /opt/ceph ceph-node2:/opt/ceph ceph-node3:/opt/cephceph-deploy osd activate ceph-node1:/opt/ceph ceph-node2:/opt/c Eph ceph-node3: /opt/ceph # add metadata node ceph-deploy mds create ceph-node1 ########################## ####################################### distribution key file # ceph-deploy admin ceph-admin ceph-node1 ceph-node2 ############################ #################################### cluster check ceph healthceph -sceph-wceph quorum_status -- format json-pretty # Mount yum install-y ceph-fusemkdir/mnt/ceph on the client [root @ ceph-admin ~] # Ceph osd pool create metadata 256 256 [root @ ceph-admin ~] # Ceph osd pool create data 256 256 [root @ ceph-admin ~] # Ceph fs new filesystemNew metadata data [root @ ceph-admin ceph] # ceph fs lsname: filesystemNew, metadata pool: metadata, data pools: [data] [root @ ceph-admin ceph] # ceph mds state5: 1/1/1 up {0 = ceph-node1 = up: active} ceph-fuse-m 192.168.55.186: 6789/mnt/ceph #### end #### add osd node ssh ceph-node1sudo mkdir/var/local/osd2exit [root @ ceph-admin my-cluster] # ceph- deploy osd prepare ceph-node1: /var/local/osd2 [root @ ceph-admin my-cluster] # ceph-deploy osd activate ceph-node1: /var/local/osd2 [root @ ceph-admin my-cluster] # ceph-w [root @ ceph-admin my-cluster] # ceph-s cluster has health HEALTH_ OK monmap e1: 1 mons at {ceph-node1 = 192.168.55.186: 6789/0}, election epoch 2, quorum 0 ceph-node1 osdmap e13: 3 osds: 3 up, 3 in pgmap v38: 64 pgs, 1 pools, 0 bytes data, 0 objects 18600 MB used, 35153 MB/53754 MB avail 64 active + clean # Add monitors node [root @ ceph-admin my-cluster] # ceph-deploy new ceph-node2 ceph-node3 [root @ ceph-admin my-cluster] # ceph-deploy mon create-initial [root @ ceph-admin my-cluster] # ceph-deploy -- overwrite-conf mon create ceph-node2 ceph-node3