一起来学ceph 05.cephFS

cephFS

环境

192.168.48.56  web

192.168.126.101 ceph01
192.168.126.102 ceph02
192.168.126.103 ceph03
192.168.126.104 ceph04
192.168.126.105 ceph-admin

192.168.48.11 ceph01
192.168.48.12 ceph02
192.168.48.13 ceph03
192.168.48.14 ceph04
192.168.48.15 ceph-admin
###所有节点内核版本要求4.5以上
uname -r
5.2.2-1.el7.elrepo.x86_64

创建ceph pool

[cephadm@ceph-admin ceph-cluster]$ ceph-deploy mds create ceph02
[cephadm@ceph-admin ceph-cluster]$ ceph osd pool create pool3  64 64 
pool 'pool3' created
[cephadm@ceph-admin ceph-cluster]$ ceph osd pool create pool4  64 64 
pool 'pool4' created
[cephadm@ceph-admin ceph-cluster]$ ceph fs new cephfs pool3 pool4
new fs with metadata pool 8 and data pool 9
[cephadm@ceph-admin ceph-cluster]$ ceph -s
  cluster:
    id:     8a83b874-efa4-4655-b070-704e63553839
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 5m)
    mgr: ceph04(active, since 5m), standbys: ceph03
    mds: cephfs:1 {0=ceph02=up:active}
    osd: 8 osds: 8 up (since 5m), 8 in (since 9d)
    rgw: 1 daemon active (ceph01)
 
  data:
    pools:   9 pools, 352 pgs
    objects: 243 objects, 50 MiB
    usage:   8.3 GiB used, 64 GiB / 72 GiB avail
    pgs:     352 active+clean
 
[cephadm@ceph-admin ceph-cluster]$ ceph osd pool ls
pool1
pool2
.rgw.root
default.rgw.control
default.rgw.meta
default.rgw.log
pool3
pool4
rbdpool

[cephadm@ceph-admin ceph-cluster]$ ceph fs status cephfs
cephfs - 0 clients
======
+------+--------+--------+---------------+-------+-------+
| Rank | State  |  MDS   |    Activity   |  dns  |  inos |
+------+--------+--------+---------------+-------+-------+
|  0   | active | ceph02 | Reqs:    0 /s |   10  |   13  |
+------+--------+--------+---------------+-------+-------+
+-------+----------+-------+-------+
|  Pool |   type   |  used | avail |
+-------+----------+-------+-------+
| pool3 | metadata | 1536k | 20.0G |
| pool4 |   data   |    0  | 20.0G |
+-------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
+-------------+
MDS version: ceph version 14.2.1 (d555a9489eb35f84f2e1ef49b77e19da9d113972) nautilus (stable)

MDS

添加mds

[cephadm@ceph-admin ceph-cluster]$ ceph-deploy  mds create ceph01  ceph03
[cephadm@ceph-admin ceph-cluster]$ ceph fs status 
cephfs - 1 clients
======
+------+--------+--------+---------------+-------+-------+
| Rank | State  |  MDS   |    Activity   |  dns  |  inos |
+------+--------+--------+---------------+-------+-------+
|  0   | active | ceph02 | Reqs:    0 /s |   10  |   13  |
+------+--------+--------+---------------+-------+-------+
+-------+----------+-------+-------+
|  Pool |   type   |  used | avail |
+-------+----------+-------+-------+
| pool3 | metadata | 1536k | 20.0G |
| pool4 |   data   |    0  | 20.0G |
+-------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
|    ceph01   |
|    ceph03   |
+-------------+

修改max_mds

[cephadm@ceph-admin ceph-cluster]$ ceph fs set cephfs max_mds 2
[cephadm@ceph-admin ceph-cluster]$ ceph fs status 
cephfs - 0 clients
======
+------+--------+--------+---------------+-------+-------+
| Rank | State  |  MDS   |    Activity   |  dns  |  inos |
+------+--------+--------+---------------+-------+-------+
|  0   | active | ceph02 | Reqs:    0 /s |   10  |   13  |
|  1   | active | ceph01 | Reqs:    0 /s |    0  |    0  |
+------+--------+--------+---------------+-------+-------+
+-------+----------+-------+-------+
|  Pool |   type   |  used | avail |
+-------+----------+-------+-------+
| pool3 | metadata | 1536k | 20.0G |
| pool4 |   data   |    0  | 20.0G |
+-------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
|    ceph03   |
+-------------+

mds备用

vim ceph.conf

[mds.ceph03]
mds_standby_for_fscid=cephfs
#mds_standby_for_name=cepf01
#mds_standby_replay=true

[cephadm@ceph-admin ceph-cluster]$ ceph-deploy config push ceph01 ceph02 ceph03 ceph04 ceph-admin

创建cephfs

创建客户端账号

[cephadm@ceph-admin ceph-cluster]$ ceph auth get-or-create client.fsclient mon 'allow r' mds 'allow rw' osd 'allow rwx pool=pool4' -o ceph.client.fsclient.keyring
[cephadm@ceph-admin ceph-cluster]$ ceph auth get client.fsclient
exported keyring for client.fsclient
[client.fsclient]
	key = AQBYCTRdgnwECBAAh9DQ4Bmy5YyvBWxoTLEVeA==
	caps mds = "allow rw"
	caps mon = "allow r"
	caps osd = "allow rwx pool=pool4"

[cephadm@ceph-admin ceph-cluster]$ ceph-authtool -p -n client.fsclient ceph.client.fsclient.keyring
AQBYCTRdgnwECBAAh9DQ4Bmy5YyvBWxoTLEVeA==

[cephadm@ceph-admin ceph-cluster]$ ceph auth print-key client.fsclient > fsclient.key
[cephadm@ceph-admin ceph-cluster]$ cat fsclient.key 
AQBYCTRdgnwECBAAh9DQ4Bmy5YyvBWxoTLEVeA==

[cephadm@ceph-admin ceph-cluster]$ scp fsclient.key  root@web:/etc/ceph
[cephadm@ceph-admin ceph-cluster]$ scp ceph.conf   root@web:/etc/ceph
[cephadm@ceph-admin ceph-cluster]$ scp ceph.client.fsclient.keyring   root@web:/etc/ceph

客户端挂载

内核直接挂载

[root@web ~]# cd /etc/ceph/
[root@web ceph]# ls
ceph.client.fsclient.keyring  ceph.client.rbdpool.keyring  ceph.conf  fsclient.key  rbdmap

[root@web ceph]# mount -t ceph  ceph01:6789,ceph02:6789,ceph03:6789:/  /data -o name=fsclient,secretfile=/etc/ceph/fsclient.key
[root@web ceph]# mount | tail -1
192.168.48.11:6789,192.168.48.12:6789,192.168.48.13:6789:/ on /data type ceph (rw,relatime,name=fsclient,secret=<hidden>,acl)

[root@web ceph]# stat -f /data
  File: "/data"
    ID: 3ac8f33a6c7ef18c Namelen: 255     Type: ceph
Block size: 4194304    Fundamental block size: 4194304
Blocks: Total: 18432      Free: 16316      Available: 16316
Inodes: Total: 243        Free: -1

vim /etc/fstab

ceph01:6789,ceph02:6789,ceph03:6789:/  /data   ceph  name=fsclient,secretfile=/etc/ceph/fsclient.key,_netdev,noatime 0 0

ceph-fuse挂载

[root@web ~]# yum -y install ceph-fuse
[root@web ~]# ceph-fuse -n client.fsclient -m ceph01:6789,ceph02:6789,ceph03:6789  /data
vim /etc/fstab

none        /data    fuse.ceph   ceph.id=fsclient,ceph.conf=/etc/ceph/ceph.conf,_netdev,defaults 0 0
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值