[root@ceph01 ceph-cluster]#ceph osd pool create mypool 32 #数据
pool 'mypool' created
[root@ceph01 ceph-cluster]#ceph osd pool create mypool_mata 32 #元数据
pool 'mypool_mata' created
[root@ceph01 ceph-cluster]#ceph osd pool ls
mypool
mypool_mata
[root@ceph01 ceph-cluster]#rados lspools
mypool
mypool_mata
[root@ceph01 ceph-cluster]#ceph fs new fs-test mypool_mata mypool #创建cephfs:fs-test,绑定pool
new fs with metadata pool 2 and data pool 1[root@ceph01 ceph-cluster]#ceph fs ls
name: fs-test, metadata pool: mypool_mata, data pools: [mypool ][root@ceph01 ceph-cluster]# ceph fs status fs-test
fs-test - 0 clients
=======
+------+--------+--------+---------------+-------+-------+
| Rank | State | MDS | Activity | dns | inos |
+------+--------+--------+---------------+-------+-------+
|0| active | ceph01 | Reqs: 0 /s |10|13|
+------+--------+--------+---------------+-------+-------+
+-------------+----------+-------+-------+
| Pool |type| used | avail |
+-------------+----------+-------+-------+
| mypool_mata | metadata | 1536k | 283G || mypool | data |0| 283G |
+-------------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
+-------------+
MDS version: ceph version 14.2.22 (ca74598065096e6fcbd8433c8779a2be0c889351) nautilus (stable)[root@ceph01 ceph-cluster]#ceph mds stat
fs-test:1 {0=ceph01=up:active}#已启用[root@ceph01 ceph-cluster]#modprobe rbd #加载rbd内核模块[root@ceph01 ceph-cluster]#lsmod | grep rbd
rbd 942080
libceph 3604481 rbd
[root@ceph01 ceph-cluster]#cat ceph.client.admin.keyring[client.admin]
key =AQBJawRjSaZUEBAAZvfbk2N9Our6O6yPzJEZxg==
caps mds ="allow *"
caps mgr ="allow *"
caps mon ="allow *"
caps osd ="allow *"#客户端挂载 mkdir /fs_test
mount -t ceph 10.30.130.21:6789:/ /cephfs_test -o name=admin,secret=AQCYNFBbMFgrGRAATVYCVImCvnW+SeK9MDGb1g==
2、cephfs删除pool
#错误删除的报错信息:#删除pool[root@ceph01 ceph-cluster]#ceph osd pool rm mypool
Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool mypool. If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it. #把pool名传递两次,并后边跟--yes-i-really-really-mean-it[root@ceph01 ceph-cluster]#ceph osd pool rm mypool mypool --yes-i-really-really-mean-it
Error EBUSY: pool 'mypool' is in use by CephFS #被cephfs使用[root@ceph01 ceph-cluster]#ceph osd pool rm mypool mypool --yes-i-really-really-mean-it
Error EPERM: pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool #配置文件里mon_allow_pool_delete需要是true#删除cephfs[root@ceph01 ceph-cluster]#ceph fs rm fs-test
Error EINVAL: all MDS daemons must be inactive/failed before removing filesystem. See `ceph fs fail`.#MDS进程必须是inactive/failed[root@ceph01 ceph-cluster]#ceph fs rm fs-test
Error EPERM: this is a DESTRUCTIVE operation and will make data in your filesystem permanently inaccessible. Add --yes-i-really-mean-it if you are sure you wish to continue. #后边跟--yes-i-really-really-mean-it#正确删除步骤:[root@ceph01 ceph-cluster]#systemctl stop ceph-mds@ceph01 #停用mds服务[root@ceph01 ceph-cluster]#ceph fs rm fs-test --yes-i-really-mean-it #删除cephfs文件系统,可通过ceph fs ls查询[root@ceph01 ceph-cluster]#vi /etc/ceph/ceph.conf #注:所有mon节点都需要配置并重启服务[mon]
mon_allow_pool_delete =true[root@ceph01 ceph-cluster]#systemctl restart ceph-mon.target[root@ceph01 ceph-cluster]#ceph osd pool delete mypool mypool --yes-i-really-really-mean-it #删除pool,rm和delete都可以
pool 'mypool' removed
[root@ceph01 ceph-cluster]#ceph osd pool rm mypool_mata mypool_mata --yes-i-really-really-mean-it
pool 'mypool_mata' removed
#删除后恢复配置:[root@ceph01 ceph-cluster]#vi /etc/ceph/ceph.conf #注:所有mon节点都需要配置并重启服务[mon]#mon_allow_pool_delete = true[root@ceph01 ceph-cluster]#systemctl start ceph-mds@ceph01 #启用mds服务