Linux培训
达内IT学院

400-111-8989

ceph 文件系统的安装

  • 发布:Linux培训
  • 来源:网络
  • 时间:2015-07-03 18:47

yum install -y wget
wget #/packages/source/p/pip/pip-1.5.6.tar.gz#md5=01026f87978932060cc86c1dc527903e
tar zxvf pip-1.5.6.tar.gz
cd pip-1.5.6
python setup.py build
python setup.py install
ssh-keygen


#################################
#echo "ceph-admin" >/etc/hostname
#echo "ceph-node1" >/etc/hostname
#echo "ceph-node2" >/etc/hostname
#echo "ceph-node3" >/etc/hostname
#reboot
#################################

cat >/etc/hosts<<EOF
192.168.55.185 ceph-admin
192.168.55.186 ceph-node1
192.168.55.187 ceph-node2
192.168.55.188 ceph-node3
EOF

ssh-copy-id root@ceph-node1 && sh-copy-id root@ceph-node2 && ssh-copy-id root@ceph-node3

ssh root@ceph-node1 systemctl stop firewalld && setenforce 0
ssh root@ceph-node2 systemctl stop firewalld && setenforce 0
ssh root@ceph-node3 systemctl stop firewalld && setenforce 0


cat >/root/.ssh/config<<EOF
Host ceph-node1
   Hostname ceph-node1
   User root
Host ceph-node2
   Hostname ceph-node2
   User root
Host ceph-node3
   Hostname ceph-node3
   User root
EOF






mkdir ~/my-cluster
cd ~/my-cluster
pip install ceph-deploy

ceph-deploy new ceph-node1 ceph-node2 ceph-node3
ceph-deploy install ceph-node1 ceph-node2 ceph-node3
ceph-deploy mon create-initial
ceph-deploy mon create ceph-node1 ceph-node2 ceph-node3
ceph-deploy gatherkeys ceph-node1 ceph-node2 ceph-node3

############################################################################
## ceph-deploy --overwrite-conf mon create ceph-node1 ceph-node2 ceph-node3#
############################################################################


#mkfs.xfs /dev/sdb
#mount /dev/sdb /opt/ceph/

ssh root@ceph-node1 mkdir /opt/ceph
ssh root@ceph-node2 mkdir /opt/ceph
ssh root@ceph-node3 mkdir /opt/ceph
    
ceph-deploy osd prepare ceph-node1:/opt/ceph ceph-node2:/opt/ceph ceph-node3:/opt/ceph
ceph-deploy osd activate ceph-node1:/opt/ceph ceph-node2:/opt/ceph ceph-node3:/opt/ceph

#添加metadata节点
ceph-deploy mds create ceph-node1

###############################################################
#分发key文件
#ceph-deploy admin ceph-admin ceph-node1 ceph-node2 ceph-node3
###############################################################
#集群检查
ceph health
ceph -s
ceph -w
ceph quorum_status --format json-pretty



              
#客户端挂载
yum install -y ceph-fuse
mkdir /mnt/ceph


[root@ceph-admin ~]# ceph osd pool create metadata 256 256
[root@ceph-admin ~]# ceph osd pool create data 256 256
[root@ceph-admin ~]# ceph fs new filesystemNew metadata data

[root@ceph-admin ceph]# ceph fs ls
ame: filesystemNew, metadata pool: metadata, data pools: [data ]

[root@ceph-admin ceph]# ceph mds stat
e5: 1/1/1 up {0=ceph-node1=up:active}


ceph-fuse -m 192.168.55.186:6789 /mnt/ceph







####end####













#添加osd节点
ssh ceph-node1
sudo mkdir /var/local/osd2
exit
[root@ceph-admin my-cluster]# ceph-deploy osd prepare ceph-node1:/var/local/osd2
[root@ceph-admin my-cluster]# ceph-deploy osd activate ceph-node1:/var/local/osd2
[root@ceph-admin my-cluster]# ceph -w
[root@ceph-admin my-cluster]# ceph -s
    cluster 8f7a79b6-ab8d-40c7-abfa-6e6e23d9a26d
     health HEALTH_OK
     monmap e1: 1 mons at {ceph-node1=192.168.55.186:6789/0}, election epoch 2, quorum 0 ceph-node1
     osdmap e13: 3 osds: 3 up, 3 in
      pgmap v38: 64 pgs, 1 pools, 0 bytes data, 0 objects
            18600 MB used, 35153 MB / 53754 MB avail
                  64 active+clean
                  



#添加monitors节点
[root@ceph-admin my-cluster]# ceph-deploy new ceph-node2 ceph-node3
[root@ceph-admin my-cluster]# ceph-deploy mon create-initial
[root@ceph-admin my-cluster]# ceph-deploy --overwrite-conf mon create ceph-node2 ceph-node3


预约申请免费试听课

填写下面表单即可预约申请免费试听! 怕学不会?助教全程陪读,随时解惑!担心就业?一地学习,可全国推荐就业!

上一篇:双系统如何删除Linux,恢复Windows从MBR引导启动?
下一篇:Centos6.4 安装pptpd vpn服务
  • 扫码领取资料

    回复关键字:视频资料

    免费领取 达内课程视频学习资料

Copyright © 2023 Tedu.cn All Rights Reserved 京ICP备08000853号-56 京公网安备 11010802029508号 达内时代科技集团有限公司 版权所有

选择城市和中心
黑龙江省

吉林省

河北省

湖南省

贵州省

云南省

广西省

海南省