ceph相关命令

###############centos7

mkdir my-cluster

cd my-cluster

yum install ceph-deploy -y

ceph-deploy new mon01

yum install python-minimal -y

ceph-deploy install mon01 node01 node02

MDS=”mon01″

MON=”mon01 node01 node02″

OSDS=”mon01 node01 node02″

INST=”$OSDS $MON”

echo “osd pool default size = 2

osd max object name len = 256

osd max object namespace len = 64

mon_pg_warn_max_per_osd = 2000

mon clock drift allowed = 30

mon clock drift warn backoff = 30

rbd cache writethrough until flush = false” >> ceph.conf

apt-get install -y ceph-base

apt-get install -y ceph-common

apt-get install -y ceph-fs-common

apt-get install -y ceph-fuse

apt-get install -y ceph-mds

apt-get install -y ceph-mon

apt-get install -y ceph-osd

[Ceph]

name=Ceph packages for $basearch

baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/$basearch

enabled=1

priority=1

gpgcheck=1

gpgkey=https://download.ceph.com/keys/release.asc

[Ceph-noarch]

name=Ceph noarch packages

baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/noarch

enabled=1

priority=1

gpgcheck=1

gpgkey=https://download.ceph.com/keys/release.asc

[ceph-source]

name=Ceph source packages

baseurl=http://mirrors.163.com/ceph/rpm-nautilus/el7/SRPMS

enabled=0

priority=1

gpgcheck=1

gpgkey=https://download.ceph.com/keys/release.asc

yum install python-setuptools

yum install -y deltarpm

yum install -y gdisk

ceph mgr module disable dashboard

ceph-deploy mgr create mon01 node01 node02

ceph dashboard ac-user-create admin passw0rd administrator

systemctl restart ceph-mon.target

ceph-deploy –overwrite-conf mds create mon01 node01 node02 创建mds

ceph osd pool create cephfs_data 128

ceph osd pool create cephfs_metadata 128

ceph fs new myfs cephfs_metadata cephfs_data

ceph osd pool rm cephfs_data cephfs_data –yes-i-really-really-mean-it 删除cephfs_data

ceph fs rm myfs –yes-i-really-mean-it 删除myfs

mount -t ceph 192.168.169.190:6789:/ /fsdata -o name=admin,secretfile=/etc/ceph/admin.secret

##ceph添加新节点

ceph-deploy –overwrite-conf config push admin mon01 node01 node02 node03

ceph-deploy –overwrite-conf mon create node03

ceph-deploy –overwrite-conf mon add node03

ceph-deploy osd create –data /dev/sdb node03

#######rbd链接方式

ceph osd pool create rbd_test 128 128

rbd create rbd_date –size 20480 -p rbd_test

rbd –image rbd_date -p rbd_test info

rbd feature disable rbd_test/rbd_date object-map fast-diff deep-flatten

rbd map rbd_date -p rbd_test

rbd showmapped

ceph osd pool application enable rbd_test rbd_date

mkfs.xfs /dev/rbd0

mkdir /rbddate

mount /dev/rbd0 /rbddate/

dd if=/dev/zero of=/rbddate/10G bs=1M count=10240

#cloudstack挂载rbd存储

ceph auth get-or-create client.cloudstack mon ‘allow r’ osd ‘allow rwx pool=vm-data’

AQD+VQVfELMbJRAA5LspVxtCykwJ3LFzwYLyFQ==

####删除RBD

rbd list -p rbd_test

rbd unmap /dev/rbd0

rbd rm rbd_date -p rbd_test

rbd list -p rbd_test 查看镜像

rbd snap ls rbd_test/dfe36912-ba7f-11ea-a837-000c297bc10e 查看镜像的快照

rbd snap unprotect rbd_test/dfe36912-ba7f-11ea-a837-000c297bc10e@cloudstack-base-snap 解除快照保护

rbd snap purge rbd_test/ede76ccf-f86a-4ab7-afa7-1adc4f1b576b 删除快照

rbd rm rbd_test/ede76ccf-f86a-4ab7-afa7-1adc4f1b576b 删除镜像

rbd children vm-data/2368966f-0ea3-11eb-8538-3448edf6aa08@cloudstack-base-snap 查看子快照

rbd flatten vm-data/79900df4-0b18-42cb-854b-c29778f02aff 还原子快照

问题:This means the image is still open or the client using it crashed. Try again after closing/unmapping it or waiting 30s for the crashed client to timeout.

解决:

rbd status vm-data/6926af02-27c3-47ad-a7ee-86c7d95aa353 查看戳号

ceph osd blacklist add 172.31.156.11:0/4126702798

查看rbd 残留的watch信息

[root@node-2 ~]# rbd status compute/2d05517a-8670-4cce-b39d-709e055381d6_disk Watchers: watcher=192.168.55.2:0/2900899764 client.14844 cookie=139644428642944

将该残留的watch信息添加到osd的黑名单,再查看watch是否存在。

[root@node-2 ~]# ceph osd blacklist add 192.168.55.2:0/2900899764 blacklisting 192.168.55.2:0/2900899764 until 2018-06-11 14:25:31.027420 (3600 sec) [root@node-2 ~]# rbd status compute/2d05517a-8670-4cce-b39d-709e055381d6_disk Watchers: none

删除rbd

[root@node-2 ~]# rbd rm compute/2d05517a-8670-4cce-b39d-709e055381d6_disk Removing image: 100% complete…done.