准备
本地部署虚拟机
本文使用VirtualBox,安装CentOS 7.9 minimal系统。配置IP为192.168.56.101/24。
相关配置方法参考网上即可。
安装ceph相关rpm
本文基于Ceph N版本进行配置。
配置规划
- client.admin keyring:/etc/ceph/ceph.client.admin.keyring
- 通用config:/etc/ceph/ceph.conf
- ceph-mon
- 数据目录:/var/lib/ceph/mon/ceph-node
- 执行权限:root
- ceph-mgr
- 目录:/var/lib/ceph/mgr/ceph-node
- 执行权限:root
- ceph-osd
- 目录:/var/lib/ceph/osd/osd.0
- 执行权限:ceph
- 存储池
- 副本数:1
部署ceph-mon
创建/etc/ceph/ceph.conf配置文件
[global]
fsid = 7ea38da8-2af9-498b-9dc8-05ec5578e761
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 1
osd pool default min size = 1
osd pool default pg num = 256
osd pool default pgp num = 256
osd crush chooseleaf type = 0
mon_allow_pool_delete = true
debug_limit = 30/0
debug_mon = 30/0
debug_mgr = 30/0
[mon.node]
mon_addr = 192.168.56.101:6789
public_addr = 192.168.56.101:6789
创建keyring
创建mon keyring
[root@localhost ~]# ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
creating /tmp/ceph.mon.keyring
创建admin keyring
[root@localhost ~]# ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
creating /etc/ceph/ceph.client.admin.keyring
将admin keyring追加到mon keyring
[root@localhost ~]# ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
importing contents of /etc/ceph/ceph.client.admin.keyring into /tmp/ceph.mon.keyring
生成mon keyring为。
[mon.]
key = AQAPINViacPQIRAAT+G+vTDdpnUELMKvrU+kdA==
caps mon = "allow *"
[client.admin]
key = AQB5INViXa00HxAAxLN1BbSFbFZnqMd6i/OfxA==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
创建mon map
[root@localhost data]# monmaptool --create --add node 192.168.56.101 --fsid 7ea38da8-2af9-498b-9dc8-05ec5578e761 /tmp/monmap
monmaptool: monmap file /tmp/monmap
monmaptool: set fsid to 7ea38da8-2af9-498b-9dc8-05ec5578e761
monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)
创建mon数据目录
创建mon数据目录
[root@localhost ~]# mkdir /var/lib/ceph/mon/ceph-node
[root@localhost ~]# ceph-mon --mkfs -i node --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
启动mon服务
启动mon daemon
ceph-mon -i node
启动msgr2服务
[root@localhost ~]# ceph mon enable-msgr2
查看ceph状态
[root@localhost data]# ceph -s
cluster:
id: 7ea38da8-2af9-498b-9dc8-05ec5578e761
health: HEALTH_OK
services:
mon: 1 daemons, quorum node (age 11m)
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
部署ceph-mgr
创建keyring
创建ceph-mgr数据目录
[root@localhost ~]# mkdir /var/lib/ceph/mgr/ceph-node
创建mgr keyring
[root@localhost ~]# ceph auth get-or-create mgr.node mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /var/lib/ceph/mgr/ceph-node/keyring
启动mgr
[root@localhost ~]# ceph-mgr -i node
查看ceph状态
[root@localhost data]# ceph -s
cluster:
id: 7ea38da8-2af9-498b-9dc8-05ec5578e761
health: HEALTH_WARN
OSD count 0 < osd_pool_default_size 1
1 monitors have not enabled msgr2
services:
mon: 1 daemons, quorum node (age 12h)
mgr: node(active, since 12h)
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
部署ceph-osd
磁盘格式化
生成uuid
[root@localhost ~]# uuidgen
610c99f7-8537-4c8c-8019-d6e4a71227fc
创建磁盘分区
[root@localhost ~]# sgdisk --largest-new=1 --change-name=1:"ceph-osd-0" --partition-guid=1:610c99f7-8537-4c8c-8019-d6e4a71227fc --mbrtogpt /dev/sdb
Setting name!
partNum is 0
REALLY setting name!
The operation has completed successfully.
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 128G 0 disk
├─sda1 8:1 0 1G 0 part /boot
└─sda2 8:2 0 127G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 69.1G 0 lvm /home
sdb 8:16 0 64G 0 disk
└─sdb1 8:17 0 64G 0 part
sr0 11:0 1 1024M 0 rom
sr1 11:1 1 1024M 0 rom
设置磁盘目录权限
[root@localhost ~]# chown ceph:ceph /dev/disk/by-partuuid/610c99f7-8537-4c8c-8019-d6e4a71227fc
创建osd id
[root@localhost ~]# ceph osd create
0
创建osd keyring
设置ceph目录权限
[root@localhost ~]# chown ceph:ceph /var/lib/ceph/osd/ceph-0/
创建keyring
[root@localhost ~]# ceph-osd --no-mon-config -i 0 --mkfs --osd-objectstore=bluestore --setgroup ceph --setuser ceph --mkkey
添加osd
[root@localhost ~]# ceph auth add osd.0 osd 'allow *' mon 'allow rwx' mgr 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring
added key for osd.0
[root@localhost ~]# ceph auth ls
installed auth entries:
osd.0
key: AQB0S9ZiiykvABAAE3YbIIIx+bj/g813JFCj6Q==
caps: [mgr] allow rwx
caps: [mon] allow rwx
caps: [osd] allow *
......
启动osd
[root@localhost ~]# ceph-osd -i 0 --setgroup ceph --setuser ceph
查询ceph状态
[root@localhost ~]# ceph -s
cluster:
id: 7ea38da8-2af9-498b-9dc8-05ec5578e761
health: HEALTH_WARN
1 monitors have not enabled msgr2
services:
mon: 1 daemons, quorum node (age 16h)
mgr: node(active, since 16h)
osd: 1 osds: 1 up (since 6s), 1 in (since 6s)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 1.0 GiB used, 99 GiB / 100 GiB avail
pgs:
创建存储池
创建存储池
[root@localhost ~]# ceph osd pool create blockpool0 128 128
pool 'blockpool0' created
设置副本数
[root@localhost ~]# ceph osd pool set blockpool0 size 1
set pool 1 size to 1
[root@localhost ~]# ceph osd pool set blockpool0 min_size 1
set pool 1 min_size to 1
查看ceph状态
[root@localhost ~]# ceph -s
cluster:
id: 7ea38da8-2af9-498b-9dc8-05ec5578e761
health: HEALTH_WARN
1 pool(s) have no replicas configured
services:
mon: 1 daemons, quorum node (age 9m)
mgr: node(active, since 17h)
osd: 1 osds: 1 up (since 13m), 1 in (since 40m)
data:
pools: 1 pools, 240 pgs
objects: 0 objects, 0 B
usage: 1.1 GiB used, 99 GiB / 100 GiB avail
pgs: 240 active+clean
参考文献
https://www.cnblogs.com/freeweb/p/13542285.html
https://docs.ceph.com/en/quincy/