实验环境:
3 台 monitor 节点:centos7.6 操作系统
3台 osd 节点: centos7.6 操作系统,每台 20 块磁盘
ceph 集群结构:集群只开启 rbd、rgw、cephfs 服务(块存储、对象存储网关),3 个 monitor 节点还充当 rgw、mgr 节点
1、github 下载版本 ceph-ansible-stable-3.2 并解压
2、按照 requirement.txt 要求安装 ansible 2.6.0
pip install "ansible==2.6.0"
3、创建 inventory 文件
[root@ansible002 ceph-ansible-stable-3.2]# cat ceph-host
mon1 ansible_host=192.168.1.201
mon2 ansible_host=192.168.1.202
mon3 ansible_host=192.168.1.203
osd1 ansible_host=192.168.1.137
osd2 ansible_host=192.168.1.138
osd3 ansible_host=192.168.1.139
[mons]
mon1
mon2
mon3
[osds]
osd1
osd2
osd3
[rgws]
mon1
mon2
mon3
[mgrs]
mon1
mon2
mon3
[mdss]
mon1
mon2
mon3
[all:vars]
ansible_user=root
ansible_ssh_pass=123456
#ansible_sudo_pass=123456
4、创建 playbook
mv site.yml.sample site.yml
根据集群需求修改 playbook,本实验只安装 ceph rbd 、 rgw 和 cephfs
---
# Defines deployment design and assigns role to server groups
- hosts:
- mons
- osds
- mdss
- rgws
#- nfss
#- restapis
#- rbdmirrors
#- clients
- mgrs
#- iscsigws
#- iscsi-gws # for backward compatibility only!
gather_facts: false
any_errors_fatal: true
become: true
....
5、配置 ceph 集群
[root@ansible002 ceph-ansible-stable-3.2]# cat group_vars/all.yml
public_network: "192.168.1.0/24"
cluster_network: "192.168.1.0/24"
devices:
- '/dev/sdb'
- '/dev/sdc'
- '/dev/sdd'
- '/dev/sde'
- '/dev/sdf'
- '/dev/sdg'
- '/dev/sdh'
- '/dev/sdi'
- '/dev/sdj'
- '/dev/sdk'
- '/dev/sdl'
- '/dev/sdm'
- '/dev/sdn'
- '/dev/sdo'
- '/dev/sdp'
- '/dev/sdq'
- '/dev/sdr'
- '/dev/sds'
- '/dev/sdt'
- '/dev/sdu'
cluster: ceph
mon_group_name: mons
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
mgr_group_name: mgrs
centos_package_dependencies: # centos 操作系统依赖包
- python-pycurl
- epel-release
- python-setuptools
- libselinux-python
ceph_origin: repository
ceph_repository: community
ceph_mirror: http://mirror.yum-ceph.com/ceph
ceph_stable_key: "{{ ceph_mirror }}/keys/release.asc"
ceph_stable_release: luminous
ceph_stable_release: luminous # ceph 版本
monitor_interface: eth0 # mon 节点网卡
osd_objectstore: bluestore
## Rados Gateway options
radosgw_frontend_type: civetweb # For additional frontends see:
http://docs.ceph.com/docs/mimic/radosgw/frontends/
radosgw_civetweb_port: 8080 # rgw 端口
radosgw_civetweb_num_threads: 512
radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}"
radosgw_frontend_port: "{{ radosgw_civetweb_port if radosgw_frontend_type == 'civetweb' else '8080' }}"
radosgw_frontend_options: "{{ radosgw_civetweb_options if radosgw_frontend_type == 'civetweb' else '' }}"
radosgw_thread_pool_size: 512
radosgw_interface: eth0
rgw_multisite: false
rgw_zone: default
cephx: true
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_scenario: lvm
ceph_mgr_modules: [status, dashboard, prometheus]
ceph_conf_overrides:
global:
mon_clock_drift_allowed: 2
mon_clock_drift_warn_backoff: 30
osd_pool_default_pg_num: 8
osd_pool_default_pgp_num: 8
osd_pool_default_size: 1
# mgr:
# mgr modules: dashboard
mon:
mon_allow_pool_delete: true
注意:此处 ceph_mirror: http://mirror.yum-ceph.com/ceph 需要改为能访问的 ceph yum 源
增加了 ceph_conf_overrides 、 mdss、osd_scenario: lvm
6、开始安装
[root@ansible002 ceph-ansible-stable-3.2]# ansible-playbook -i ceph-host site.yml
7、取消部署
ansible-playbook -i ceph-host infrastructure-playbooks/purge-cluster.yml
清理磁盘命令
[root@ansible002 ceph-ansible-stable-3.2]# cat zap-disk.sh
#!/usr/bin/env bash
i=1
while [ $i -lt 21 ]
do
j=`echo $i|awk '{printf "%c",97+$i}'`
#echo $j
DISK="/dev/sd$j"
sgdisk --zap-all $DISK
#dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync
i=$(($i+1))
done