ceph存储快速部署

ceph存储快速部署

下面我们准备环境部署。一般需要5台服务器,master与它的四个mod节点。我们这边为了节约成本使用3台。

主机名ip
ceph-master192.168.0.102
ceph-node1192.168.0.103
ceph-node2192.168.0.104

每台主机需要2块磁盘,磁盘大小自己控制
镜像源提取码
https://pan.baidu.com/s/1EjN9TTbgGH2VR4J2wyVqdw
提取吗:iiii

//准备工作 三台主机都要进行操作
[root@ceph-master ~]# yum install -y net-tools lrzsz unzip
[root@ceph-node1 ~]# yum install net-tools lrzsz unzip -y
[root@ceph-node2 ~]# yum install net-tools lrzsz unzip -y

//3台主机都要添加主节点的ip信息
[root@ceph-master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.102 ceph-master
192.168.0.103 ceph-node1
192.168.0.104 ceph-node2
[root@ceph-node1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.102 ceph-master
192.168.0.103 ceph-node1
192.168.0.104 ceph-node2
[root@ceph-node2 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.0.102 ceph-master
192.168.0.103 ceph-node1
192.168.0.104 ceph-node2


//关闭防火墙。selinux,swap分区
[root@ceph-master ~]# systemctl stop firewalld && systemctl disable firewalld
[root@ceph-master ~]# setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce: SELinux is disabled
[root@ceph-master ~]# swapoff -a &&  sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@ceph-node1 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@ceph-node1 ~]# setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce: SELinux is disabled
[root@ceph-node1 ~]# swapoff -a &&  sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@ceph-node2 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@ceph-node2 ~]# setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce: SELinux is disabled
[root@ceph-node2 ~]# swapoff -a &&  sed -ri 's/.*swap.*/#&/' /etc/fstab

//这些参数加在文件结尾
[root@ceph-master ~]# ulimit -SHn 65535
[root@ceph-master ~]# cat /etc/security/limits.conf
# End of file
* soft nofile 65535
* hard nofile 65535
* soft nproc  65535
* hard nproc  65535
[root@ceph-node1 ~]# ulimit -SHn 65535
[root@ceph-node1 ~]# cat /etc/security/limits.conf 
# End of file
* soft nofile 65535
* hard nofile 65535
* soft nproc  65535
* hard nproc  65535
[root@ceph-node2 ~]# ulimit -SHn 65535
[root@ceph-node2 ~]# cat /etc/security/limits.conf 
# End of file
* soft nofile 65535
* hard nofile 65535
* soft nproc  65535
* hard nproc  65535

[root@ceph-master ~]# vi /etc/sysctl.conf 
[root@ceph-master ~]# cat /etc/sysctl.conf 
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20
[root@ceph-node1 ~]# cat /etc/sysctl.conf 
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20
[root@ceph-node2 ~]# vi /etc/sysctl.conf 
[root@ceph-node2 ~]# cat /etc/sysctl.conf 
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20

[root@ceph-master ~]# sysctl -p
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20
[root@ceph-node1 ~]# sysctl -p
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20
[root@ceph-node2 ~]# sysctl -p
kernel.pid_max = 4194303
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_tw_buckets = 20480
net.ipv4.tcp_max_syn_backlog = 20480
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_fin_timeout = 20
//主节点ceph-master添加yum源 node节点添加镜像源
[root@ceph-master ~]# cd /opt/
[root@ceph-master opt]# unzip ceph_images.zip 
[root@ceph-master opt]# unzip ceph-pkg.zip
[root@ceph-node1 opt]# unzip ceph_images.zip 
[root@ceph-node2 opt]# unzip ceph_images.zip 
[root@ceph-master ~]# mkdir -p /etc/yum.repos.d.bak
[root@ceph-master ~]# mv /etc/yum.repos.d/* /etc/yum.repos.d.bak/
[root@ceph-master ~]# cd /etc/yum.repos.d
[root@ceph-master yum.repos.d]# vi ceph.repo
[root@ceph-master yum.repos.d]# cat ceph.repo 
[ceph]
name=ceph
baseurl=file:///opt/ceph-pkg/
gpgcheck=0
enabled=1
[root@ceph-master yum.repos.d]# yum makecache 
//建立缓存
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
ceph                                                                                                                                             | 2.9 kB  00:00:00     
(1/3): ceph/filelists_db                                                                                                                         | 141 kB  00:00:00     
(2/3): ceph/other_db                                                                                                                             |  79 kB  00:00:00     
(3/3): ceph/primary_db                                                                                                                           | 158 kB  00:00:00     
Metadata Cache Created

//三台都要操作
[root@ceph-master ~]# yum install -y vsftpd
[root@ceph-node1 opt]# yum install -y vsftpd
[root@ceph-node2 opt]# yum install -y vsftpd
[root@ceph-master opt]# echo "anon_root=/opt/" >> /etc/vsftpd/vsftpd.conf
[root@ceph-node1 opt]# echo "anon_root=/opt/" >> /etc/vsftpd/vsftpd.conf
[root@ceph-node2 opt]# echo "anon_root=/opt/" >> /etc/vsftpd/vsftpd.conf
[root@ceph-master opt]# systemctl enable --now vsftpd
Created symlink from /etc/systemd/system/multi-user.target.wants/vsftpd.service to /usr/lib/systemd/system/vsftpd.service.
[root@ceph-node1 opt]# systemctl enable --now vsftpd
Created symlink from /etc/systemd/system/multi-user.target.wants/vsftpd.service to /usr/lib/systemd/system/vsftpd.service.
[root@ceph-node2 opt]# systemctl enable --now vsftpd
Created symlink from /etc/systemd/system/multi-user.target.wants/vsftpd.service to /usr/lib/systemd/system/vsftpd.service.
//两个节点换上ceph-master的源
[root@ceph-node1 yum.repos.d]# mkdir /etc/yum.repos.d.bak/ -p
[root@ceph-node1 opt]# mv /etc/yum.repos.d/* /etc/yum.repos.d.bak/
[root@ceph-node1 opt]# cd /etc/yum.repos.d
[root@ceph-node1 yum.repos.d]# vi ceph.repo
[root@ceph-node1 yum.repos.d]# cat ceph.repo 
[ceph]
name=ceph
baseurl=ftp://192.168.0.102/ceph-pkg/
gpgcheck=0
enabled=1
[root@ceph-node1 yum.repos.d]# yum clean all && yum makecache
[root@ceph-node2 yum.repos.d]# mkdir /etc/yum.repos.d.bak/ -p
[root@ceph-node2 opt]# mv /etc/yum.repos.d/* /etc/yum.repos.d.bak/
[root@ceph-node2 opt]# cd /etc/yum.repos.d
[root@ceph-node2 yum.repos.d]# vi ceph.repo
[root@ceph-node2 yum.repos.d]# cat ceph.repo 
[ceph]
name=ceph
baseurl=ftp://192.168.0.102/ceph-pkg/
gpgcheck=0
enabled=1
[root@ceph-node2 yum.repos.d]# yum clean all && yum makecache
//三台同时都要配置时钟源
[root@ceph-master ~]# yum install -y chrony
[root@ceph-master ~]# vi /etc/chrony.conf 
[root@ceph-master ~]# cat /etc/chrony.conf 
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
server 192.168.0.102 iburst
allow all
local stratum 10
[root@ceph-master ~]# systemctl restart chronyd && clock -w
[root@ceph-node1 ~]# yum install -y chrony
[root@ceph-node1 ~]# vi /etc/chrony.conf 
[root@ceph-node1 ~]# cat /etc/chrony.conf 
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
server 192.168.0.102 iburst
allow all
local stratum 10
[root@ceph-node1 ~]# systemctl restart chronyd && clock -w
[root@ceph-node2 ~]# yum install -y chrony
[root@ceph-node2 ~]# vi /etc/chrony.conf 
[root@ceph-node2 ~]# cat /etc/chrony.conf 
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
server 192.168.0.102 iburst
allow all
local stratum 10
[root@ceph-node2 ~]# systemctl restart chronyd && clock -w
//安装docker三台进行
[root@ceph-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 docker-ce python3
[root@ceph-master ~]# systemctl enable --now docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@ceph-master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-06-20 23:12:20 EDT; 48s ago
     Docs: https://docs.docker.com
 Main PID: 3034 (dockerd)
    Tasks: 8
   Memory: 32.6M
   CGroup: /system.slice/docker.service
           └─3034 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
[root@ceph-node1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 docker-ce python3
[root@ceph-node1 ~]# systemctl enable --now docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@ceph-node1 ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-06-20 23:12:30 EDT; 39s ago
     Docs: https://docs.docker.com
 Main PID: 9700 (dockerd)
    Tasks: 8
   Memory: 36.2M
   CGroup: /system.slice/docker.service
           └─9700 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
[root@ceph-node2 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 docker-ce python3
[root@ceph-node2 ~]# systemctl enable --now docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@ceph-node2 ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2023-06-20 23:12:33 EDT; 39s ago
     Docs: https://docs.docker.com
 Main PID: 2719 (dockerd)
    Tasks: 7
   Memory: 32.7M
   CGroup: /system.slice/docker.service
           └─2719 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

//安装cephadm
[root@ceph-master ~]# yum install -y cephadm
[root@ceph-node1 ~]# yum install -y cephadm
[root@ceph-node2 ~]# yum install -y cephadm

//docker镜像导入
[root@ceph-master ~]# cd  /opt/ceph_images
[root@ceph-master ceph_images]# for i in `ls`;do docker load -i $i;done
34da11dd2489: Loading layer [==================================================>]  495.8MB/495.8MB
Loaded image: quay.io/ceph/ceph-grafana:6.7.4
5966005eac8d: Loading layer [==================================================>]  224.8MB/224.8MB
9936c6aaa811: Loading layer [==================================================>]  20.48kB/20.48kB
4f77fe8574f4: Loading layer [==================================================>]  45.82MB/45.82MB
dd44da1818c3: Loading layer [==================================================>]  148.7MB/148.7MB
1a968afdd6da: Loading layer [==================================================>]  821.8MB/821.8MB
Loaded image: quay.io/ceph/ceph:v15
1da8e4c8d307: Loading layer [==================================================>]  1.437MB/1.437MB
7a151fe67625: Loading layer [==================================================>]  2.595MB/2.595MB
5c66bc325d3d: Loading layer [==================================================>]  22.46MB/22.46MB
192df11ddc70: Loading layer [==================================================>]  26.97MB/26.97MB
1bb45a6cc3fe: Loading layer [==================================================>]  3.072kB/3.072kB
fd718f46814b: Loading layer [==================================================>]  3.584kB/3.584kB
Loaded image: quay.io/prometheus/alertmanager:v0.20.0
975e03895fb7: Loading layer [==================================================>]  4.688MB/4.688MB
f9fe8137e4e3: Loading layer [==================================================>]  2.765MB/2.765MB
78f40987f0cd: Loading layer [==================================================>]  16.88MB/16.88MB
Loaded image: quay.io/prometheus/node-exporter:v0.18.1
b260acf93b38: Loading layer [==================================================>]  87.18MB/87.18MB
2aca26a0561a: Loading layer [==================================================>]  49.98MB/49.98MB
78b7236da63d: Loading layer [==================================================>]  3.584kB/3.584kB
598b46af43c3: Loading layer [==================================================>]  13.31kB/13.31kB
c416dca9c497: Loading layer [==================================================>]  28.16kB/28.16kB
c41930d39a5d: Loading layer [==================================================>]  13.31kB/13.31kB
883efb2f2811: Loading layer [==================================================>]   5.12kB/5.12kB
a9374a821205: Loading layer [==================================================>]  113.2kB/113.2kB
c87cd4adecc5: Loading layer [==================================================>]  3.072kB/3.072kB
724129e57a6e: Loading layer [==================================================>]   5.12kB/5.12kB
Loaded image: quay.io/prometheus/prometheus:v2.18.1

[root@ceph-node1 ~]# cd  /opt/ceph_images
[root@ceph-node1 ceph_images]# for i in `ls`;do docker load -i $i;done
34da11dd2489: Loading layer [==================================================>]  495.8MB/495.8MB
Loaded image: quay.io/ceph/ceph-grafana:6.7.4
5966005eac8d: Loading layer [==================================================>]  224.8MB/224.8MB
9936c6aaa811: Loading layer [==================================================>]  20.48kB/20.48kB
4f77fe8574f4: Loading layer [==================================================>]  45.82MB/45.82MB
dd44da1818c3: Loading layer [==================================================>]  148.7MB/148.7MB
1a968afdd6da: Loading layer [==================================================>]  821.8MB/821.8MB
Loaded image: quay.io/ceph/ceph:v15
1da8e4c8d307: Loading layer [==================================================>]  1.437MB/1.437MB
7a151fe67625: Loading layer [==================================================>]  2.595MB/2.595MB
5c66bc325d3d: Loading layer [==================================================>]  22.46MB/22.46MB
192df11ddc70: Loading layer [==================================================>]  26.97MB/26.97MB
1bb45a6cc3fe: Loading layer [==================================================>]  3.072kB/3.072kB
fd718f46814b: Loading layer [==================================================>]  3.584kB/3.584kB
Loaded image: quay.io/prometheus/alertmanager:v0.20.0
975e03895fb7: Loading layer [==================================================>]  4.688MB/4.688MB
f9fe8137e4e3: Loading layer [==================================================>]  2.765MB/2.765MB
78f40987f0cd: Loading layer [==================================================>]  16.88MB/16.88MB
Loaded image: quay.io/prometheus/node-exporter:v0.18.1
b260acf93b38: Loading layer [==================================================>]  87.18MB/87.18MB
2aca26a0561a: Loading layer [==================================================>]  49.98MB/49.98MB
78b7236da63d: Loading layer [==================================================>]  3.584kB/3.584kB
598b46af43c3: Loading layer [==================================================>]  13.31kB/13.31kB
c416dca9c497: Loading layer [==================================================>]  28.16kB/28.16kB
c41930d39a5d: Loading layer [==================================================>]  13.31kB/13.31kB
883efb2f2811: Loading layer [==================================================>]   5.12kB/5.12kB
a9374a821205: Loading layer [==================================================>]  113.2kB/113.2kB
c87cd4adecc5: Loading layer [==================================================>]  3.072kB/3.072kB
724129e57a6e: Loading layer [==================================================>]   5.12kB/5.12kB
Loaded image: quay.io/prometheus/prometheus:v2.18.1
[root@ceph-node2 ~]# cd  /opt/ceph_images
[root@ceph-node2 ceph_images]# for i in `ls`;do docker load -i $i;done
34da11dd2489: Loading layer [==================================================>]  495.8MB/495.8MB
Loaded image: quay.io/ceph/ceph-grafana:6.7.4
5966005eac8d: Loading layer [==================================================>]  224.8MB/224.8MB
9936c6aaa811: Loading layer [==================================================>]  20.48kB/20.48kB
4f77fe8574f4: Loading layer [==================================================>]  45.82MB/45.82MB
dd44da1818c3: Loading layer [==================================================>]  148.7MB/148.7MB
1a968afdd6da: Loading layer [==================================================>]  821.8MB/821.8MB
Loaded image: quay.io/ceph/ceph:v15
1da8e4c8d307: Loading layer [==================================================>]  1.437MB/1.437MB
7a151fe67625: Loading layer [==================================================>]  2.595MB/2.595MB
5c66bc325d3d: Loading layer [==================================================>]  22.46MB/22.46MB
192df11ddc70: Loading layer [==================================================>]  26.97MB/26.97MB
1bb45a6cc3fe: Loading layer [==================================================>]  3.072kB/3.072kB
fd718f46814b: Loading layer [==================================================>]  3.584kB/3.584kB
Loaded image: quay.io/prometheus/alertmanager:v0.20.0
975e03895fb7: Loading layer [==================================================>]  4.688MB/4.688MB
f9fe8137e4e3: Loading layer [==================================================>]  2.765MB/2.765MB
78f40987f0cd: Loading layer [==================================================>]  16.88MB/16.88MB
Loaded image: quay.io/prometheus/node-exporter:v0.18.1
b260acf93b38: Loading layer [==================================================>]  87.18MB/87.18MB
2aca26a0561a: Loading layer [==================================================>]  49.98MB/49.98MB
78b7236da63d: Loading layer [==================================================>]  3.584kB/3.584kB
598b46af43c3: Loading layer [==================================================>]  13.31kB/13.31kB
c416dca9c497: Loading layer [==================================================>]  28.16kB/28.16kB
c41930d39a5d: Loading layer [==================================================>]  13.31kB/13.31kB
883efb2f2811: Loading layer [==================================================>]   5.12kB/5.12kB
a9374a821205: Loading layer [==================================================>]  113.2kB/113.2kB
c87cd4adecc5: Loading layer [==================================================>]  3.072kB/3.072kB
724129e57a6e: Loading layer [==================================================>]   5.12kB/5.12kB
Loaded image: quay.io/prometheus/prometheus:v2.18.1

//初始化mon节点  初始化必须在创建的目录下执行
[root@ceph-master etc]# mkdir -p /etc/ceph
[root@ceph-master etc]# cd /etc/ceph
[root@ceph-master ceph]# cephadm bootstrap --mon-ip 192.168.0.102 --skip-pull
Verifying podman|docker is present...
Verifying lvm2 is present...
Verifying time synchronization is in place...
Unit chronyd.service is enabled and running
Repeating the final host check...
podman|docker (/usr/bin/docker) is present
systemctl is present
lvcreate is present
Unit chronyd.service is enabled and running
Host looks OK
Cluster fsid: 7d2cbcc2-0ffa-11ee-ab70-000c29e38167
Verifying IP 192.168.0.102 port 3300 ...
Verifying IP 192.168.0.102 port 6789 ...
Mon IP 192.168.0.102 is in CIDR network 192.168.0.0/24
Extracting ceph user uid/gid from container image...
Creating initial keys...
Creating initial monmap...
Creating mon...
Waiting for mon to start...
Waiting for mon...
mon is available
Assimilating anything we can from ceph.conf...
Generating new minimal ceph.conf...
Restarting the monitor...
Setting mon public_network...
Creating mgr...
Verifying port 9283 ...
Wrote keyring to /etc/ceph/ceph.client.admin.keyring
Wrote config to /etc/ceph/ceph.conf
Waiting for mgr to start...
Waiting for mgr...
mgr not available, waiting (1/10)...
mgr not available, waiting (2/10)...
mgr not available, waiting (3/10)...
mgr not available, waiting (4/10)...
mgr not available, waiting (5/10)...
mgr not available, waiting (6/10)...
mgr not available, waiting (7/10)...
mgr is available
Enabling cephadm module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 5...
Mgr epoch 5 is available
Setting orchestrator backend to cephadm...
Generating ssh key...
Wrote public SSH key to to /etc/ceph/ceph.pub
Adding key to root@localhost's authorized_keys...
Adding host ceph-master...
Deploying mon service with default placement...
Deploying mgr service with default placement...
Deploying crash service with default placement...
Enabling mgr prometheus module...
Deploying prometheus service with default placement...
Deploying grafana service with default placement...
Deploying node-exporter service with default placement...
Deploying alertmanager service with default placement...
Enabling the dashboard module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 13...
Mgr epoch 13 is available
Generating a dashboard self-signed certificate...
Creating initial admin user...
Fetching dashboard port number...
Ceph Dashboard is now available at:

	     URL: https://ceph-master:8443/
	    User: admin
	Password: feimg9qb21

You can access the Ceph CLI with:

	sudo /usr/sbin/cephadm shell --fsid 7d2cbcc2-0ffa-11ee-ab70-000c29e38167 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/

Bootstrap complete.

[root@ceph-node1 ceph_images]# mkdir -p /etc/ceph
[root@ceph-node1 ceph_images]# cd /etc/ceph/
[root@ceph-node1 ceph]# cephadm bootstrap --mon-ip 192.168.0.103 --skip-pull
Verifying podman|docker is present...
Verifying lvm2 is present...
Verifying time synchronization is in place...
Unit chronyd.service is enabled and running
Repeating the final host check...
podman|docker (/usr/bin/docker) is present
systemctl is present
lvcreate is present
Unit chronyd.service is enabled and running
Host looks OK
Cluster fsid: 6a458360-0ff9-11ee-8dc8-000c29d7c559
Verifying IP 192.168.0.103 port 3300 ...
Verifying IP 192.168.0.103 port 6789 ...
Mon IP 192.168.0.103 is in CIDR network 192.168.0.0/24
Extracting ceph user uid/gid from container image...
Creating initial keys...
Creating initial monmap...
Creating mon...
Waiting for mon to start...
Waiting for mon...
mon is available
Assimilating anything we can from ceph.conf...
Generating new minimal ceph.conf...
Restarting the monitor...
Setting mon public_network...
Creating mgr...
Verifying port 9283 ...
Wrote keyring to /etc/ceph/ceph.client.admin.keyring
Wrote config to /etc/ceph/ceph.conf
Waiting for mgr to start...
Waiting for mgr...
mgr not available, waiting (1/10)...
mgr not available, waiting (2/10)...
mgr not available, waiting (3/10)...
mgr not available, waiting (4/10)...
mgr not available, waiting (5/10)...
mgr not available, waiting (6/10)...
mgr not available, waiting (7/10)...
mgr not available, waiting (8/10)...
mgr is available
Enabling cephadm module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 5...
Mgr epoch 5 is available
Setting orchestrator backend to cephadm...
Generating ssh key...
Wrote public SSH key to to /etc/ceph/ceph.pub
Adding key to root@localhost's authorized_keys...
Adding host ceph-node1...
Deploying mon service with default placement...
Deploying mgr service with default placement...
Deploying crash service with default placement...
Enabling mgr prometheus module...
Deploying prometheus service with default placement...
Deploying grafana service with default placement...
Deploying node-exporter service with default placement...
Deploying alertmanager service with default placement...
Enabling the dashboard module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 13...
Mgr epoch 13 is available
Generating a dashboard self-signed certificate...
Creating initial admin user...
Fetching dashboard port number...
Ceph Dashboard is now available at:

	     URL: https://ceph-node1:8443/
	    User: admin
	Password: bhb1ploe4m

You can access the Ceph CLI with:

	sudo /usr/sbin/cephadm shell --fsid 6a458360-0ff9-11ee-8dc8-000c29d7c559 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/

Bootstrap complete.

[root@ceph-node2 ceph_images]# mkdir -p /etc/ceph
[root@ceph-node2 ceph_images]# cd /etc/ceph/
[root@ceph-node2 ceph]# cephadm bootstrap --mon-ip 192.168.0.104 --skip-pull
Verifying podman|docker is present...
Verifying lvm2 is present...
Verifying time synchronization is in place...
Unit chronyd.service is enabled and running
Repeating the final host check...
podman|docker (/usr/bin/docker) is present
systemctl is present
lvcreate is present
Unit chronyd.service is enabled and running
Host looks OK
Cluster fsid: 4c3664ac-0fe5-11ee-842e-000c29420986
Verifying IP 192.168.0.104 port 3300 ...
Verifying IP 192.168.0.104 port 6789 ...
Mon IP 192.168.0.104 is in CIDR network 192.168.0.0/24
Extracting ceph user uid/gid from container image...
Creating initial keys...
Creating initial monmap...
Creating mon...
Waiting for mon to start...
Waiting for mon...
mon is available
Assimilating anything we can from ceph.conf...
Generating new minimal ceph.conf...
Restarting the monitor...
Setting mon public_network...
Creating mgr...
Verifying port 9283 ...
Wrote keyring to /etc/ceph/ceph.client.admin.keyring
Wrote config to /etc/ceph/ceph.conf
Waiting for mgr to start...
Waiting for mgr...
mgr not available, waiting (1/10)...
mgr not available, waiting (2/10)...
mgr not available, waiting (3/10)...
mgr not available, waiting (4/10)...
mgr not available, waiting (5/10)...
mgr not available, waiting (6/10)...
mgr not available, waiting (7/10)...
mgr not available, waiting (8/10)...
mgr not available, waiting (9/10)...
mgr not available, waiting (10/10)...
mgr is available
Enabling cephadm module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 5...
Mgr epoch 5 is available
Setting orchestrator backend to cephadm...
Generating ssh key...
Wrote public SSH key to to /etc/ceph/ceph.pub
Adding key to root@localhost's authorized_keys...
Adding host ceph-node2...
Deploying mon service with default placement...
Deploying mgr service with default placement...
Deploying crash service with default placement...
Enabling mgr prometheus module...
Deploying prometheus service with default placement...
Deploying grafana service with default placement...
Deploying node-exporter service with default placement...
Deploying alertmanager service with default placement...
Enabling the dashboard module...
Waiting for the mgr to restart...
Waiting for Mgr epoch 14...
Mgr epoch 14 is available
Generating a dashboard self-signed certificate...
Creating initial admin user...
Fetching dashboard port number...
Ceph Dashboard is now available at:

	     URL: https://ceph-node2:8443/
	    User: admin
	Password: qv9wx3lzuy

You can access the Ceph CLI with:

	sudo /usr/sbin/cephadm shell --fsid 4c3664ac-0fe5-11ee-842e-000c29420986 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/    //看到这个代表成功

Bootstrap complete.
//安装ceph-common工具ceph-master节点
[root@ceph-master ~]# yum install -y ceph-common

//做免密登录
[root@ceph-master ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:kPQ1ybWosy0fepmuPEUhtdhpNQCekzxC+prg8YWU9d8 root@ceph-master
The key's randomart image is:
+---[RSA 2048]----+
|      + .+=++    |
|     * *.*+* o   |
|    + + @.*..    |
|   . o o *..     |
|  o . o S.. E    |
| . + +   +.      |
|  . +   o.oo     |
|       ..++.     |
|        +=o      |
+----[SHA256]-----+
[root@ceph-master ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph-node1 (192.168.0.103)' can't be established.
ECDSA key fingerprint is SHA256:PSHSfM2W+MAZsBms32kp8G4feRRwBWYsjmVP4tT0oWM.
ECDSA key fingerprint is MD5:a2:9a:39:17:02:70:3d:eb:67:17:ab:2f:01:e7:61:ab.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph-node1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@ceph-node1'"
and check to make sure that only the key(s) you wanted were added.

[root@ceph-master ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph-node2 (192.168.0.104)' can't be established.
ECDSA key fingerprint is SHA256:hDhAwoOA8TiUV1S4slkUeNDXsGH4JzgxVPaI7iKx0rQ.
ECDSA key fingerprint is MD5:b3:44:fa:3d:2a:dd:53:71:55:8d:db:c1:fd:26:a9:ab.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph-node2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@ceph-node2'"
and check to make sure that only the key(s) you wanted were added.

//发现集群  master与2个节点形成集群
[root@ceph-master ~]# ceph orch host add ceph-master
Added host 'ceph-master'
[root@ceph-master ~]# ceph orch host add ceph-node1
Added host 'ceph-node1'
[root@ceph-master ~]# ceph orch host add ceph-node2
Added host 'ceph-node2'


//添加节点信息
[root@ceph-master ~]# ceph orch apply mon ceph-master,ceph-node1,ceph-node2
Scheduled mon update...
[root@ceph-master ~]# ceph orch host label add ceph-master mon
Added label mon to host ceph-master
[root@ceph-master ~]# ceph orch host label add ceph-node1 mon
Added label mon to host ceph-node1
[root@ceph-master ~]# ceph orch host label add ceph-node2 mon
Added label mon to host ceph-node2

//使用ps -ef|grep docker查看进程
[root@ceph-node1 ceph]# ps -ef | grep docker
[root@ceph-node2 ~]# ps -ef | grep docker

//验证
[root@ceph-master ~]# ceph orch device ls
Hostname     Path      Type  Serial  Size   Health   Ident  Fault  Available  
ceph-master  /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        
ceph-node1   /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        
ceph-node2   /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        

//重启服务
[root@ceph-master ~]# systemctl restart ceph.target

部署OSD
存储数据

//查看可用的磁盘设备
[root@ceph-master ~]# ceph orch device ls
Hostname     Path      Type  Serial  Size   Health   Ident  Fault  Available  
ceph-master  /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        
ceph-node1   /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        
ceph-node2   /dev/sdb  hdd           21.4G  Unknown  N/A    N/A    Yes        

//添加到ceph集群中,在未使用的设备上自动创建osd
[root@ceph-master ~]# ceph orch apply osd --all-available-devices
Scheduled osd.all-available-devices update...

//查看osd磁盘
[root@ceph-master ~]# ceph -s
  cluster:
    id:     7d2cbcc2-0ffa-11ee-ab70-000c29e38167
    health: HEALTH_WARN
            Degraded data redundancy: 1 pg undersized
 
  services:
    mon: 2 daemons, quorum ceph-master,ceph-node1 (age 29s)
    mgr: ceph-node1.fvwnlz(active, since 12m), standbys: ceph-master.xzmino
    osd: 3 osds: 3 up (since 98s), 3 in (since 98s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   2.0 GiB used, 38 GiB / 40 GiB avail
    pgs:     1 active+undersized
[root@ceph-master ~]# ceph df
--- RAW STORAGE ---
CLASS  SIZE    AVAIL   USED    RAW USED  %RAW USED
hdd    60 GiB  57 GiB  11 MiB   3.0 GiB       5.02
TOTAL  60 GiB  57 GiB  11 MiB   3.0 GiB       5.02
 
--- POOLS ---
POOL                   ID  PGS  STORED  OBJECTS  USED  %USED  MAX AVAIL
device_health_metrics   1    1     0 B        0   0 B      0     18 GiB

部署MDS
存储元数据
CephFS 需要两个 Pools,cephfs-data 和 cephfs-metadata,分别存储文件数据和文件元数据

[root@ceph-master ~]# ceph osd pool create cephfs-metadata 32 32
pool 'cephfs-metadata' created
[root@ceph-master ~]# ceph osd pool create cephfs-data 64 64
pool 'cephfs-data' created
[root@ceph-master ~]# ceph fs new cephfs cephfs-metadata cephfs-data
new fs with metadata pool 2 and data pool 3

//查看 cephfs
[root@ceph-master ~]# ceph fs ls
name: cephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]
[root@ceph-master ~]# ceph orch apply mds cephfs --placement="3 ceph-master ceph-node1 ceph-node2"
Scheduled mds.cephfs update...

//查看mds有三个,两个预备状态
[root@ceph-master ~]# ceph -s
  cluster:
    id:     7d2cbcc2-0ffa-11ee-ab70-000c29e38167
    health: HEALTH_WARN
            1 failed cephadm daemon(s)
            Slow OSD heartbeats on back (longest 1295.833ms)
            Slow OSD heartbeats on front (longest 1277.546ms)
 
  services:
    mon: 2 daemons, quorum ceph-master,ceph-node1 (age 23m)
    mgr: ceph-master.xzmino(active, since 38m), standbys: ceph-node1.fvwnlz
    mds: cephfs:1 {0=cephfs.ceph-node1.hmaono=up:active} 2 up:standby
    osd: 3 osds: 3 up (since 39m), 3 in (since 39m)
 
  data:
    pools:   3 pools, 97 pgs
    objects: 22 objects, 2.2 KiB
    usage:   3.0 GiB used, 57 GiB / 60 GiB avail
    pgs:     97 active+clean
[root@ceph-master ~]# ceph orch apply rgw rgw01 zone01 --placement="3 ceph-master ceph-node2 ceph-node2"
Scheduled rgw.rgw01.zone01 update...
[root@ceph-master ~]# ceph orch ls
NAME                       RUNNING  REFRESHED  AGE  PLACEMENT                                  IMAGE NAME                                IMAGE ID      
alertmanager                   1/1  78s ago    2d   count:1                                    quay.io/prometheus/alertmanager:v0.20.0   0881eb8f169f  
crash                          3/3  78s ago    3d   *                                          quay.io/ceph/ceph:v15                     93146564743f  
grafana                        1/1  78s ago    2d   count:1                                    quay.io/ceph/ceph-grafana:6.7.4           557c83e11646  
mds.cephfs                     3/3  78s ago    8m   ceph-master;ceph-node1;ceph-node2;count:3  quay.io/ceph/ceph:v15                     93146564743f  
mgr                            2/2  78s ago    3d   count:2                                    quay.io/ceph/ceph:v15                     93146564743f  
mon                            2/3  78s ago    2d   ceph-master;ceph-node1;ceph-node2          quay.io/ceph/ceph:v15                     mix           
node-exporter                  1/3  78s ago    2d   *                                          quay.io/prometheus/node-exporter:v0.18.1  e5a616e4b9cf  
osd.all-available-devices      3/3  78s ago    13m  *                                          quay.io/ceph/ceph:v15                     93146564743f  
prometheus                     1/1  78s ago    2d   count:1                                    quay.io/prometheus/prometheus:v2.18.1     de242295e225  
rgw.rgw01.zone01               3/3  78s ago    4m   ceph-master;ceph-node1;ceph-node2;count:3  quay.io/ceph/ceph:v15                     93146564743f  

创建授权账号,给客户端使用

//-o 指定保存的客户端key文件,这是一种方式,可以创建授权文件,再利用授权文件导出key保存给
客户端使用
[root@ceph-master ~]# ceph auth get-or-create client.fsclient mon 'allow r' mds 'allow rw' osd 'allow rwx pool=cephfs-data' -o ceph.client.fsclient.keyring
//获取key
[root@ceph-master ~]# ceph auth print-key client.fsclient > fsclient.key
[root@ceph-master ~]# scp fsclient.key root@ceph-node1:/etc/ceph/
fsclient.key   
//传递给予客户端                                                                                                                           
[root@ceph-master ~]# scp fsclient.key root@ceph-node2:/etc/ceph/
fsclient.key                                               

客户端使用

//挂载使用cephfs(ceph2或ceph3节点)
[root@ceph-node1 ~]# yum -y install ceph-common

//查看ceph模块
[root@ceph-master ~]# modinfo ceph
filename:       /lib/modules/3.10.0-1160.el7.x86_64/kernel/fs/ceph/ceph.ko.xz
license:        GPL
description:    Ceph filesystem for Linux
author:         Patience Warnick <patience@newdream.net>
author:         Yehuda Sadeh <yehuda@hq.newdream.net>
author:         Sage Weil <sage@newdream.net>
alias:          fs-ceph
retpoline:      Y
rhelversion:    7.9
srcversion:     EB765DDC1F7F8219F09D34C
depends:        libceph
intree:         Y
vermagic:       3.10.0-1160.el7.x86_64 SMP mod_unload modversions 
signer:         CentOS Linux kernel signing key
sig_key:        E1:FD:B0:E2:A7:E8:61:A1:D1:CA:80:A2:3D:CF:0D:BA:3A:A4:AD:F5
sig_hashalgo:   sha256

//检查是否有上面授权给予的密钥key,如果密钥则无法使用相应的key
[root@ceph-master ~]# ls /etc/ceph/
ceph.client.admin.keyring  ceph.conf  ceph.pub  fsclient.key  rbdmap

//创建挂载点(以data目录为例)
[root@ceph-master ~]# mkdir /cephfs
[root@ceph-master ~]# mount -t ceph ceph-master:6789,ceph-node1:6789,ceph-node2:6789:/ /cephfs -o name=fsclient,secretfile=/etc/ceph/fsclient.key
//查看是否挂载成功
[root@ceph-master ~]# df -TH
Filesystem                                                 Type      Size  Used Avail Use% Mounted on
devtmpfs                                                   devtmpfs  942M     0  942M   0% /dev
tmpfs                                                      tmpfs     954M     0  954M   0% /dev/shm
tmpfs                                                      tmpfs     954M   27M  928M   3% /run
tmpfs                                                      tmpfs     954M     0  954M   0% /sys/fs/cgroup
/dev/mapper/centos-root                                    xfs        51G  8.9G   42G  18% /
/dev/sda1                                                  xfs       1.1G  158M  906M  15% /boot
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/6cbf4da67a746fe2ff1e50c79cb475a1394b942299b04a4c69993f801ccc7946/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/91f9520e61e87f9c1e46c64fdd662f8275eebabb8d1f521c91a851dcf30dba88/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/55c8cd7f962bccc65f82c0dc0bc89957f2bacbb9664cad0462a726cab454dbe6/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/07a91fcecae7e2cc2bbe4702426770e77b631abe24a59631fed18eba0d79724e/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/c0981f094574a5839cc153b6fe782fa2e93211eac5cf423098a89ef23cfed9b5/merged
tmpfs                                                      tmpfs     191M     0  191M   0% /run/user/0
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/17ba80066e44ebba856eaa60115604e00dd3cea2c55e8499e190d89fe1ec7194/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/6d6839296326663edc027d506460cd9e263ccfe089630c9d61ae4be5e116205f/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/c38e803961f42c64f4225d961d724872d0ffe7dcf1bc0121c7442043ccce8ce4/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/23c4a0ed12df0ec73838285364f589f3564a38379644241db93140f4302f46ca/merged
overlay                                                    overlay    51G  8.9G   42G  18% /var/lib/docker/overlay2/67cdf1295c5c41782f705d40296f81c9199bfbea32d228d88b4ed5b81ade976c/merged
192.168.0.102:6789,192.168.0.103:6789,192.168.0.104:6789:/ ceph       20G     0   20G   0% /cephfs

//配置永久挂载
_netdev作为网络传输挂载,不能访问时就不挂载
noatime为了提示文件性能,不用实时访问时间戳
[root@ceph-master ~]# vi /etc/fstab 
[root@ceph-master ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Tue May 30 07:26:56 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=11b90b24-5596-4743-9e83-9437d0a1a1a0 /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0
ceph ceph-master:6789,ceph-node1:6789,ceph-node2:6789:/ /cephfs ceph name=fsclient,secretfile=/etc/ceph/fsclient.key,_netdev,noatime 0 0
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值