Ceph 实训考试练习
1. 部署一个新的ceph集群:集群初始化要求
集群需要三个ceph节点,主机名分别为XXXXceph01,XXXXceph02,XXXXceph03(XXXX为本人
学号后两位)。
/etc/hosts的三项主机名解析对应XXXXceph01,XXXXceph02,XXXXceph03(XXXX为本人学号
后两位)。
集群初始化的时候只需要有一个mon节点,该节点为XXXXceph01(XXXX为本人学号后两位)。
执行以下命令,验证集群状态,并截图
##ceph01配置
##配置hosts
cat >> /etc/hosts << EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.100.10 2354ceph01
192.168.100.20 2354ceph02
192.168.100.30 2354ceph03
EOF
##配置免密
yum install expect coreutils wget chrony -y
SERVERS="2354ceph01 2354ceph02 2354ceph03"
PASSWORD=000000
auto_ssh_keygen(){
expect -c "set timeout -1;
spawn ssh-keygen -t dsa;
expect {
*id_dsa)* {send -- \r;exp_continue;}
*passphrase)* {send -- \r;exp_continue;}
*again:* {send -- \r;exp_continue;}
eof {exit 0;}
}";
}
auto_ssh_keygen
auto_ssh_copy_id(){
expect -c "set timeout -1;
spawn ssh-copy-id $1;
expect {
*(yes/no)* {send -- yes\r;exp_continue;}
*password:* {send -- $2\r;exp_continue;}
eof {exit 0;}
}";
}
##遍历要发送到各个主机的ip
ssh_copy_id_to_all(){
for SERVER in $SERVERS
do
auto_ssh_copy_id $SERVER $PASSWORD
done
}
ssh_copy_id_to_all
##配置yum源
mv -f /etc/yum.repos.d/* /home/
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.huaweicloud.com/repository/conf/CentOS-7-anon.repo
cat >> /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph_huaweicloud
baseurl=https://mirrors.huaweicloud.com/ceph/rpm-15.2.4/el7/x86_64/
gpgcheck=0
enabled=1
[ceph-noarch]
name=ceph-noarch_huaweicloud
baseurl=https://mirrors.huaweicloud.com/ceph/rpm-15.2.4/el7/noarch/
gpgcheck=0
enabled=1
EOF
cat >> /etc/yum.repos.d/epel.repo << EOF
[epel]
name=epel_huaweicloud
baseurl=https://mirrors.huaweicloud.com/epel/7/x86_64/
gpgcheck=0
enabled=1
EOF
##配置host以及二、三节点yum源
scp /etc/hosts 2354ceph02:/etc/hosts
scp /etc/hosts 2354ceph03:/etc/hosts
scp /etc/yum.repos.d/* 2354ceph02:/etc/yum.repos.d/
scp /etc/yum.repos.d/* 2354ceph03:/etc/yum.repos.d/
##配置时间同步、关闭防火墙
sed -i 's/^server/#&/' /etc/chrony.conf
cat >> /etc/chrony.conf << EOF
local stratum 10
server 192.168.100.10 iburst
allow all
EOF
systemctl stop firewalld && setenforce 0
systemctl enable chronyd && systemctl restart chronyd
timedatectl set-ntp true
chronyc sources -v
ssh 2354ceph02 << eeooff
NODE1=$(cat /etc/hosts |grep ceph01 |awk -F ' ' '{print$1}')
yum -y install -y chrony expect coreutils wget
sed -i 's/^server/#&/' /etc/chrony.conf
echo server $NODE1 iburst >> /etc/chrony.conf
systemctl stop firewalld && setenforce 0
systemctl enable chronyd && systemctl restart chronyd
chronyc sources -v
exit
eeooff
ssh 2354ceph03 << eeooff
NODE1=$(cat /etc/hosts |grep ceph01 |awk -F ' ' '{print$1}')
yum -y install -y chrony expect coreutils wget
sed -i 's/^server/#&/' /etc/chrony.conf
echo server $NODE1 iburst >> /etc/chrony.conf
systemctl stop firewalld && setenforce 0
systemctl enable chronyd && systemctl restart chronyd
chronyc sources -v
exit
eeooff
##安装ceph
yum install python-setuptools ceph-deploy python3 -y
##补足ceph缺失库
pip3 install pecan
pip3 install werkzeug
yum -y install ceph ceph-radosgw
##创建ceph集群
mkdir /etc/ceph
chmod 777 /etc/ceph
cd /etc/ceph
ceph-deploy new 2354ceph01
cat >> /etc/ceph/ceph.conf <<EOF
public_network=192.168.100.0/24
EOF
ceph-deploy install 2354ceph01 --no-adjust-repos
ceph-deploy --overwrite-conf mon create-initial
chmod +r ceph.client.admin.keyring
/bin/ceph --cluster=ceph osd stat --format=json
ceph status
2. 扩展mon节点的数量到3节点
执行以下命令,验证mon节点数量,并截图
ceph-deploy new 2354ceph02 2354ceph03
ceph-deploy install 2354ceph02 2354ceph03 --no-adjust-repos
##注意如果出现mon添加不了,则每个节点检查防火墙是否关闭,再检查ceph.conf文件的public_network是否添加
##扩展mon节点的数量到3节点(sudo pkill ceph)
ceph-deploy --overwrite-conf mon create 2354ceph02 2354ceph03
##ceph-deploy --overwrite-conf config push 2354ceph01 2354ceph02 2354ceph03
##ceph-deploy --overwrite-conf mon add 2354ceph02 2354ceph03
##ceph-deploy --overwrite-conf mon create-initial
ceph mon dump
3. 每个节点都需要添加一块OSD
执行以下命令,验证osd数量及分布情况,并截图
##創建mgr 和osd(要添加/dev/sdb硬盘)
ceph-deploy mgr create 2354ceph01 2354ceph02 2354ceph03
ceph-deploy osd create --data /dev/sdb 2354ceph01
ceph-deploy osd create --data /dev/sdb 2354ceph02
ceph-deploy osd create --data /dev/sdb 2354ceph03
ceph osd tree
4. 删除一个XXXXceph03节点的osd(XXXX为本人学号后两位)
执行以下命令,验证osd数量及分布情况,并截图
##2354ceph03的osd是osd.2
ceph osd out osd.2
ssh 2354ceph03 << eeooff
systemctl stop ceph-osd@2
exit
eeooff
ceph osd crush remove osd.2
ceph auth del osd.2
ceph osd rm osd.2
ceph osd tree
##由于下面实验需要就加回去
ssh 2354ceph03 << eeooff
mke2fs -t ext4 -b 4096 /dev/sdb
cat /proc/partitions
dmsetup status
dmsetup remove_all
mke2fs -t ext4 -b 2048 -m 3 /dev/sdb
exit
eeooff
ceph-deploy --overwrite-conf osd create --data /dev/sdb 2354ceph03
5. 将ceph集群的网络分离出public network和cluster network
执行以下命令,验证osd是否监听两个网络平面,并截图(所有节点都需要截图)
##添加一块仅主机网卡:网段192.168.200.0/24
cat >> /etc/ceph/ceph.conf <<EOF
cluster_network=192.168.200.0/24
EOF
scp /etc/ceph/ceph.conf 2354ceph02:/etc/ceph/
scp /etc/ceph/ceph.conf 2354ceph03:/etc/ceph/
systemctl restart ceph.target
ssh 2354ceph02 << eeooff
systemctl restart ceph.target
eeooff
ssh 2354ceph03 << eeooff
systemctl restart ceph.target
eeooff
ss -ntlp | grep ceph-osd (三个节点都需要认证)
ceph02
ceph03
6. 创建一个存储池,存储池的名称为testpoolXXXX(XXXX为本人学号后两位)
执行以下命令,查看存储池列表,并截图
ceph osd pool create testpool2354
ceph osd pool ls
7. 上传一个对象到testpoolXXXX存储池,对象的名称为testobjectXXXX(XXXX为本人学号后两位)
执行以下命令,查看testpoolXXXX存储池里的对象列表,并截图
touch testobject2354
rados -p testpool2354 put testobject2354 testobject2354
rados ls -p testpool2354
rados ls -p testpoolXXXX
8. 创建一个RBD的image,image名称为testimageXXXX,大小为100M(XXXX为本人学号后两位)
执行以下命令,查看testimageXXXX的详细信息,并截图
rbd create testpool2354/testimage2354 --size 100
rbd info testpool2354/testimage2354
rbd info testpoolXXXX/testimageXXXX
9. 将testimageXXXX映射为块设备
执行以下命令,查看块设备映射列表,并截图
rbd feature disable testpool2354/testimage2354 object-map fast-diff deep-flatten
rbd device map testpool2354/testimage2354
rbd device list
10. 删除testpoolXXXX存储池
执行以下命令,查看存储池列表,并截图
cat >> /etc/ceph/ceph.conf <<EOF
[mon]
mon allow pool delete = true
EOF
systemctl restart ceph-mon.target
ceph osd pool delete testpool2354 testpool2354 --yes-i-really-really-mean-it
ceph osd pool ls