文档库 最新最全的文档下载
当前位置:文档库 › ceph常用命令

ceph常用命令

安装ceph
ceph-deploy new ceph-node0 ceph-node1 ceph-node2
ceph-deploy install ceph-node0 ceph-node1 ceph-node2
ceph-deploy mon create-initial
ceph-deploy mds create ceph-node0
ceph-deploy mon add ceph-node1 ceph-node2
ceph-deploy mgr create ceph-node0:ceph-node0 ceph-node1:ceph-node1 ceph-node2:ceph-node2
清空重装
ceph-deploy purge {ceph-node}[{ceph-node}]
ceph-deploy purgedata {ceph-node}[{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
创建osd存储节点
ceph-deploy osd create ceph-node0 --data /dev/sdc --journal /dev/sdb1
ceph-deploy osd create ceph-node0 --data /dev/sdd --journal /dev/sdb2
ceph-deploy osd create ceph-node1 --data /dev/sdc --journal /dev/sdb1
ceph-deploy osd create ceph-node1 --data /dev/sdd --journal /dev/sdb2
ceph-deploy osd create ceph-node2 --data /dev/sdc --journal /dev/sdb1
ceph-deploy osd create ceph-node2 --data /dev/sdd --journal /dev/sdb2
分发配置文件及秘钥到节点
ceph-deploy --overwrite-conf admin ceph-node0 ceph-node1 ceph-node2
ceph对象存储
ceph-deploy rgw create master

查看osd与ceph状态
ceph -s
ceph osd tree
ceph osd dump
watch ceph status
ceph health detail
ceph -w
ceph df
ceph auth list
ceph mon stat
ceph mon dump
ceph quorum_status
ceph osd crush dump
ceph osd crush rule list
ceph pg stat
ceph pg dump
ceph mds stat
cdph mds dump
创建应用池及文件
ceph osd pool ls
ceph osd lspools
ceph osd pool create data_data 32
ceph osd pool create data_metadata 32
ceph fs new data data_metadata data_data
ceph osd pool set data_data size 2
ceph osd pool delete data_Data
ceph osd pool application enable data_data rbd
rbd pool init rbd
客户端挂载
ceph-fuse -m 192.168.0.150,192.168.0.151,192.168.0.152:6789 /data
查看及清空硬盘
ceph-deploy disk list ceph-node1
ceph-deploy disk zap ceph-node1 /dev/sdb
ceph-deploy osd create ceph-node1 --data /dev/sdb
横向扩展
ceph-deploy mon create ceph-node2
ceph-deploy mon create ceph-node3
ceph-deploy disk list ceph-node2 ceph-node3
ceph-deploy disk zap ceph-node2:sdb ceph-node3:sdc
ceph-deploy osd create ceph-node2:sdb ceph-node2:sdc
创建块设备
rbd create ceph-client --size 10240
rbd ls
rbd --image ceph-client info
rbd --image ceph-client info -p rbd
创建块客户端
ceph-deploy admin ceph-client
rbd map --image rbd/ceph-client
rbd showmapped
rbd resize ceph-client --size 1024
rbd快照
rbd snap create ceph-client@snap1
rbd snap rollback ceph-client@snap1
rbd snap rm ceph-client@snap1
rbd snap purge ceph-client
缩容移除节点
ceph osd out osd.9
service ceph-osd@0 stop
ceph osd crush remove osd.9
ceph auth del osd.9
ceph osd curush remove ceph-node4
替换硬盘
ceph osd out osd.0
ceph osd crush rm osd.0
ceph auth del osd.0
ceph osd rm osd.0
ceph-deploy disk list ceph-node1
ceph-deploy disk zap ceph-node1 /dev/sdb
ceph-deploy --overwrite-conf osd create ceph-node1 /dev/sdb
纠删码
ceph osd erasure-code-profile set ec-profile ruleset-fail

ure-domain=osd k=3 m=2
ceph osd erasure-code-profile ls
ceph osd erasure-code-profile get ec-profile
ceph osd pool create ec-pool 16 16 erasure ec-profile
ceph auth get-or-create client.wzl mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=rbd1'
ceph auth get-or-create client.wzl | tee /etc/ceph/ceph.client.wzl.keyring
scrub定时执行
ceph tell osd.* injectargs "--osd-scrub-begin-hour 22"
ceph tell osd.* injectargs "--osd-scrub-end-hour 7"
ceph数据重建策略
ceph tell osd.* injectargs "--osd-recovery-op-priority 63"
ceph tell osd.* injectargs "--osd-client-op-priority 3"
full操作
ceph osd pause
ceph tell mon.* injectargs "--mon-osd-full-ratio 0.96"
ceph tell osd.* injectargs "--mon-osd-full-ratio 0.96"
ceph pg set_full_ratio 0.96
ceph osd unpause


相关文档