CentOS7手动部署ceph(luminous)

 

 

目录

实验环境:

实验步骤:

1.配置yum源

2.装包

3.配置mon

4.配置osd(所有节点操作)

5.配置mgr(所有节点操作)

6.查看ceph状态

7.修改ceph配置文件并重启ceph-mon(所有节点操作)

8.单节点ceph

9.rbd导入导出


实验环境:

主机名ip配置
mon01(mon+osd)192.168.226.2002C4G20G+2*20G
mon02(mon+osd)192.168.226.2012C4G20G+2*20G
mon03(mon+osd)192.168.226.2022C4G20G+2*20G

实验步骤:

1.配置yum源

cat /etc/yum.repos.d/ceph.repo

[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.ustc.edu.cn/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.ustc.edu.cn/ceph/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.ustc.edu.cn/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.ustc.edu.cn/ceph/keys/release.asc
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.ustc.edu.cn/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.ustc.edu.cn/ceph/keys/release.asc
priority=1

cat /etc/yum.repos.d/epel.repo
 

[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=http://mirrors.aliyun.com/epel/7/$basearch
failovermethod=priority
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
 
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=http://mirrors.aliyun.com/epel/7/$basearch/debug
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=0
 
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=http://mirrors.aliyun.com/epel/7/SRPMS
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=0

2.装包

yum install ceph ceph-radosgw rsync -y #装rsync是为了拷贝方便

3.配置mon


3.1配置文件(第一个节点操作)
vim /etc/ceph/ceph.conf
 

[global]
fsid = 4be0d011-a4e5-4c60-9bcb-aa6f54c87449
mon initial members = mon01
mon host = 192.168.226.200
public_network = 192.168.226.0/24


3.2配置各种keyring和monmap等(第一个节点操作)

ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'

sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

sudo chown ceph:ceph /tmp/ceph.mon.keyring

monmaptool --create --add mon01 192.168.226.200 --fsid 4be0d011-a4e5-4c60-9bcb-aa6f54c87449 /tmp/monmap

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-mon01

sudo -u ceph ceph-mon --mkfs -i mon01 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

3.3启动mon(第一个节点操作)

systemctl start ceph-mon@mon01


启动失败查看详细日志的命令
 

journalctl -b -u "ceph-mon@mon01.service"

3.5copy配置文件和keyring等到other node
 

rsync -aPv /etc/ceph/* mon02:/etc/ceph/
rsync -aPv /etc/ceph/* mon03:/etc/ceph/
rsync -aPv /var/lib/ceph/bootstrap-osd/ceph.keyring mon03:/var/lib/ceph/bootstrap-osd/ceph.keyring
rsync -aPv /var/lib/ceph/bootstrap-osd/ceph.keyring mon02:/var/lib/ceph/bootstrap-osd/ceph.keyring
rsync -aPv /tmp/ceph.mon.keyring mon02:/tmp/ceph.mon.keyring
rsync -aPv /tmp/ceph.mon.keyring mon03:/tmp/ceph.mon.keyring

3.4配置other node(其他节点操作)
 

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-mon02
ceph auth get mon. -o /tmp/ceph.mon.keyring
ceph mon getmap -o /tmp/ceph.mon.map
sudo -u ceph ceph-mon --mkfs -i mon02 --monmap /tmp/ceph.mon.map --keyring /tmp/ceph.mon.keyring
systemctl restart ceph-mon@mon02

3.5修改ceph.conf并重启ceph-mon(所有节点操作)
vim /etc/ceph/ceph.conf
 

[global]
fsid = 4be0d011-a4e5-4c60-9bcb-aa6f54c87449
mon initial members = mon01,mon02,mon03
mon host = 192.168.226.200,192.168.226.201,192.168.226.202
public_network = 192.168.226.0/24

systemctl restart ceph-mon@mon01

4.配置osd(所有节点操作)
 

ceph-volume lvm create --data /dev/sdb  #下面命令是这一步的分部操作
#ceph-volume lvm prepare --data /dev/sdb
#ceph-volume lvm list
#sudo ceph-volume lvm activate {ID} {FSID}


5.配置mgr(所有节点操作)

sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-mon01
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-mon02
ceph auth get-or-create mgr.mon01 mon 'allow profile mgr' osd 'allow *' mds 'allow *' >> /var/lib/ceph/mgr/ceph-mon01/keyring
chown ceph.ceph /var/lib/ceph/mgr/ceph-mon01/keyring
chmod 400 /var/lib/ceph/mgr/ceph-mon01/keyring
ceph-mgr -i mon01
ceph mon enable-msgr2

6.查看ceph状态

#ceph -s
  cluster:
    id:     18889b8b-6c26-474e-b2f4-e5738719125a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum mon01,mon02,mon03
    mgr: mon01(active), standbys: mon03, mon02
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   3 pools, 384 pgs
    objects: 662 objects, 882MiB
    usage:   6.57GiB used, 113GiB / 120GiB avail
    pgs:     384 active+clean
    
    
#ceph osd tree
ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF
-1       0.11691 root default                            
-3       0.03897     host mon01                         
 0   hdd 0.01949         osd.0       up  1.00000 1.00000
 1   hdd 0.01949         osd.1       up  1.00000 1.00000
-5       0.03897     host mon02                         
 2   hdd 0.01949         osd.2       up  1.00000 1.00000
 3   hdd 0.01949         osd.3       up  1.00000 1.00000
-7       0.03897     host mon03                         
 4   hdd 0.01949         osd.4       up  1.00000 1.00000
 5   hdd 0.01949         osd.5       up  1.00000 1.00000
 
#ceph -v
ceph version 12.2.11 (26dc3775efc7bb286a1d6d66faee0ba30ea23eee) luminous (stable)
 


7.修改ceph配置文件并重启ceph-mon(所有节点操作)


vim /etc/ceph/ceph.conf

[global]
fsid = 4be0d011-a4e5-4c60-9bcb-aa6f54c87449
mon initial members = mon01,mon02,mon03
mon host = 192.168.226.200,192.168.226.201,192.168.226.202
public_network = 192.168.226.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 1
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 100

systemctl restart ceph-mon@mon01

8.单节点ceph

# ceph osd getcrushmap -o crush_map_compressed
# crushtool -d crush_map_compressed -o crush_map_decompressed

# vim crush_map_decompressed
...
# rules
rule replicated_ruleset {
ruleset 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host #host改成osd
step emit
}

# end crush map

# crushtool -c crush_map_decompressed -o new_crush_map_compressed
# ceph osd setcrushmap -i new_crush_map_compressed

# ceph osd pool set device_health_metrics size 2

9.rbd导入导出

# rbd import --path cirros-0.4.0-x86_64-disk.img --dest-pool vms

# rbd export vms/cirros-0.4.0-x86_64-disk.img /tmp/cirros.img

# md5sum cirros-0.4.0-x86_64-disk.img 
443b7623e27ecf03dc9e01ee93f67afe  cirros-0.4.0-x86_64-disk.img
# md5sum /tmp/cirros.img 
443b7623e27ecf03dc9e01ee93f67afe  /tmp/cirros.img

参考链接:https://docs.ceph.com/docs/master/install/manual-deployment/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值