Linux学习--Ceph部署

前期准备

资源下载
链接: https://pan.baidu.com/s/1Yof-X-fD8kYLSmLAWFo5Ug 提取码: txj5

环境准备
主机名	IP地址
ceph1	192.168.88.11/24
ceph2	192.168.88.12/24
ceph3	192.168.88.13/24
client1	192.168.88.10/24
pubserver	192.168.88.240/24
在pubserver上配置ansible环境
[root@pubserver ~]# mkdir ceph
[root@pubserver ~]# cd ceph
[root@pubserver ceph]# vim ansible.cfg 
[defaults]
inventory = inventory
host_key_checking = false

[root@pubserver ceph]# vim inventory 
[ceph]    # 定义ceph组
ceph1 ansible_host=192.168.88.11
ceph2 ansible_host=192.168.88.12
ceph3 ansible_host=192.168.88.13

[clients]  # 定义客户端组
client1 ansible_host=192.168.88.10

[all:vars]
ansible_ssh_user=root
ansible_ssh_pass=a

[root@pubserver ceph]# mkdir files/
[root@pubserver ceph]# vim files/local88.repo 
[BaseOS]
name = BaseOS
baseurl = ftp://192.168.88.240/dvd/BaseOS
enabled = 1
gpgcheck = 0

[AppStream]
name = AppStream
baseurl = ftp://192.168.88.240/dvd/AppStream
enabled = 1
gpgcheck = 0

[rpms]
name = rpms
baseurl = ftp://192.168.88.240/rpms
enabled = 1
gpgcheck = 0

# 配置yum
[root@pubserver ceph]# vim 01-upload-repo.yml 
---
- name: config repos.d
  hosts: all
  tasks:
    - name: delete repos.d
      file:
        path: /etc/yum.repos.d
        state: absent

    - name: create repos.d
      file:
        path: /etc/yum.repos.d
        state: directory
        mode: '0755'

    - name: upload local88
      copy:
        src: files/local88.repo
        dest: /etc/yum.repos.d/
[root@pubserver ceph]# ansible-playbook 01-upload-repo.yml

配置名称解析
# 配置三台主机实现名称解析,解析的名字务必与主机实际名字一致
[root@pubserver ceph]# vim 02-modify-hosts.yml 
---
- name: add names
  hosts: ceph
  tasks:
    - name: add block
      blockinfile:   # 类似于lineinfile模块,可在目标文件中加多行
        path: /etc/hosts
        block: |
          192.168.88.11 ceph1
          192.168.88.12 ceph2
          192.168.88.13 ceph3
          192.168.88.240 quay.io
[root@pubserver ceph]# ansible-playbook 02-modify-hosts.yml 
# 查看结果,以ceph1为例
[root@ceph1 ~]# tail -6 /etc/hosts
# BEGIN ANSIBLE MANAGED BLOCK
192.168.88.11 ceph1
192.168.88.12 ceph2
192.168.88.13 ceph3
192.168.88.240 quay.io
# END ANSIBLE MANAGED BLOCK
配置pubserver为NTP服务器
# 1. 查看pubserver自己的时区,如果时区不正确需要改正
[root@pubserver ~]# timedatectl 
[root@pubserver ~]# timedatectl set-timezone Asia/Shanghai
# 2. 查看时间,如果时间不正确,需要调整时间
[root@pubserver ~]# date
[root@pubserver ~]# date -s "年-月-日 时:分:秒"
# 3. 配置chronyd服务
[root@pubserver ~]# yum install -y chrony
[root@pubserver ~]# vim /etc/chrony.conf 
...略...
 24 # Allow NTP client access from local network.
 25 allow 192.168.0.0/16  # 为192.168开头的客户端提供时间服务
 26 
 27 # Serve time even if not synchronized to a time source.
 28 local stratum 10   # 即使自己没有时间源,也为客户端提供时间服务
...略...
[root@pubserver ~]# systemctl enable chronyd --now
[root@pubserver ~]# ss -ulnp   # ntp使用udp 123端口
State  Recv-Q Send-Q   Local Address:Port   Peer Address:Port Process                           
UNCONN 0      0            127.0.0.1:323         0.0.0.0:*     users:(("chronyd",pid=554,fd=5)) 
UNCONN 0      0              0.0.0.0:123         0.0.0.0:*     users:(("chronyd",pid=554,fd=6)) 
配置ceph1-ceph3使用pubserver提供的时间服务
[root@pubserver ceph]# vim 03-config-ntp.yml
---
- name: config ntp
  hosts: ceph
  tasks:
    - name: install chrony   # 安装chrony
      yum:
        name: chrony
        state: present

    - name: modify config    # 替换以pool开头的行
      lineinfile:
        path: /etc/chrony.conf
        regexp: '^pool'
        line: "pool 192.168.88.240 iburst"
      notify: restart ntp    # 如果该任务的状态是CHANGED,则执行restart ntp任务

  handlers:
    - name: restart ntp   # 只有notify通知时,才执行重启任务
      service:
        name: chronyd
        state: restarted
        enabled: yes
[root@pubserver ceph]# ansible-playbook 03-config-ntp.yml

# 以ceph1为例,查看结果
[root@ceph1 ~]# chronyc sources -v

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current best, '+' = combined, '-' = not combined,
| /             'x' = may be in error, '~' = too variable, '?' = unusable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 192.168.88.240               10   6    37     4  -8731ns[-6313us] +/- 7118us
准备容器仓库服务器
[root@pubserver ~]# yum install -y docker-distribution-2.6.2-2.git48294d9.el7.x86_64.rpm
# 2. 启动服务
[root@pubserver ~]# systemctl enable docker-distribution --now
安装软件包,并导入镜像
# 1. 在ceph集群节点上安装软件包
[root@pubserver ceph]# vim 04-install-ceph.yml
---
- name: install pkg
  hosts: ceph
  tasks:
    - name: install pkg    # 安装软件包
      yum:
        name: python39,podman,lvm2
        state: present
[root@pubserver ceph]# ansible-playbook 04-install-ceph.yml 

# 2. 将真机/linux-soft/s2/zzg/ceph_soft/ceph-server目录拷贝到ceph各节点,并导入镜像
[root@ceph1 ~]# cd ceph-server/
[root@ceph1 ceph-server]# for c in *.tar
> do
> podman load -i $c
> done
[root@ceph2 ~]# cd ceph_soft/
[root@ceph2 ceph-server]# for c in *.tar
> do
> podman load -i $c
> done
[root@ceph3 ~]# cd ceph_soft/
[root@ceph3 ceph-server]# for c in *.tar
> do
> podman load -i $c
> done

# 3. 查看执行结果
[root@ceph1 ceph-server]# podman images
REPOSITORY                        TAG         IMAGE ID      CREATED        SIZE
quay.io/ceph/ceph                 v17         cc65afd6173a  7 weeks ago    1.4 GB
quay.io/ceph/ceph-grafana         8.3.5       dad864ee21e9  8 months ago   571 MB
quay.io/prometheus/prometheus     v2.33.4     514e6a882f6e  9 months ago   205 MB
quay.io/prometheus/node-exporter  v1.3.1      1dbe0e931976  12 months ago  22.3 MB
quay.io/prometheus/alertmanager   v0.23.0     ba2b418f427c  15 months ago  58.9 MB

# 4. 配置ceph1-ceph3使用pubserver作为仓库服务器
[root@pubserver ceph]# vim 05-config-registry.yml
---
- name: config registry
  hosts: ceph
  tasks:
    - name: modify config
      blockinfile:
        path: /etc/containers/registries.conf
        block: |
          [[registry]]
          location = "quay.io:5000"  # 指定服务器地址
          insecure = true              # 允许使用http协议
[root@pubserver ceph]# ansible-playbook 05-config-registry.yml 

# 5. 以ceph1为例,查看执行结果
[root@ceph1 ceph_soft]# tail -5 /etc/containers/registries.conf
# BEGIN ANSIBLE MANAGED BLOCK
[[registry]]
location = "quay.io:5000"
insecure = true
# END ANSIBLE MANAGED BLOCK

# 5. 修改镜像名称,以便可以将其推送到自建镜像服务器
[root@ceph1 ceph-server]# podman tag quay.io/ceph/ceph:v17 quay.io:5000/ceph/ceph:v17
[root@ceph1 ceph-server]# podman tag quay.io/ceph/ceph-grafana:8.3.5 quay.io:5000/ceph/ceph-grafana:8.3.5
[root@ceph1 ceph-server]# podman tag quay.io/prometheus/prometheus:v2.33.4 quay.io:5000/prometheus/prometheus:v2.33.4
[root@ceph1 ceph-server]# podman tag quay.io/prometheus/node-exporter:v1.3.1 quay.io:5000/prometheus/node-exporter:v1.3.1
[root@ceph1 ceph-server]# podman tag quay.io/prometheus/alertmanager:v0.23.0 quay.io:5000/prometheus/alertmanager:v0.23.0

# 6. 将镜像推送到镜像服务器,以便其他节点可以通过服务器下载镜像
[root@ceph1 ceph-server]# podman push quay.io:5000/ceph/ceph:v17
[root@ceph1 ceph-server]# podman push quay.io:5000/ceph/ceph-grafana:8.3.5 
[root@ceph1 ceph-server]# podman push quay.io:5000/prometheus/prometheus:v2.33.4 
[root@ceph1 ceph-server]# podman push quay.io:5000/prometheus/node-exporter:v1.3.1 
[root@ceph1 ceph-server]# podman push quay.io:5000/prometheus/alertmanager:v0.23.0 
安装ceph
创建集群
# 1. 在ceph1上初始化集ceph集群。
# 集群初始化完成后,将自动生成ssh免密密钥,存放在/etc/ceph/目录下
[root@ceph1 ceph-server]# ./cephadm bootstrap --mon-ip 192.168.88.11 --initial-dashboard-password=123456 --dashboard-password-noupdate
# 2. ceph将会以容器化的方式部署,查看生成了6个容器。
[root@ceph1 ceph-server]# podman ps

# 3. 拷贝密钥文件至其他节点
[root@ceph1 ceph-server]# ssh-copy-id -f -i /etc/ceph/ceph.pub ceph2
[root@ceph1 ceph-server]# ssh-copy-id -f -i /etc/ceph/ceph.pub ceph3

# 4. 进入管理容器,查看ceph状态
[root@ceph1 ceph-server]# ./cephadm shell   # 进入管理容器
[ceph: root@ceph1 /]# ceph -s  # 查看ceph状态
  cluster:
    id:     1ddfccf2-77b4-11ed-8941-000c2953b002
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph1 (age 11m)
    mgr: ceph1.vnoivz(active, since 10m)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs: 

# 5. 查看相关容器状态,显示所有容器均已启动
[ceph: root@ceph1 /]# ceph orch ls
NAME           PORTS        RUNNING  REFRESHED  AGE  PLACEMENT  
alertmanager   ?:9093,9094      1/1  91s ago    3m   count:1    
crash                           1/3  91s ago    4m   *          
grafana        ?:3000           1/1  91s ago    4m   count:1    
mgr                             1/2  91s ago    4m   count:2    
mon                             1/5  91s ago    4m   count:5    
node-exporter  ?:9100           1/3  91s ago    4m   *          
prometheus     ?:9095           1/1  91s ago    4m   count:1 


# 6. 查看集群中现有主机
[ceph: root@ceph1 /]# ceph orch host ls
HOST   ADDR           LABELS  STATUS  
ceph1  192.168.88.11  _admin          
1 hosts in cluster

# 7. 向集群中添加其他主机
[ceph: root@ceph1 /]# ceph orch host add ceph2 192.168.88.12
[ceph: root@ceph1 /]# ceph orch host add ceph3 192.168.88.13
# 注:删除错误的主机命令为:ceph orch host rm 主机名 --force

# 8. 查看集群中主机
[ceph: root@ceph1 /]# ceph orch host ls
HOST   ADDR           LABELS  STATUS  
ceph1  192.168.88.11  _admin          
ceph2  192.168.88.12                  
ceph3  192.168.88.13                  
3 hosts in cluster

# 9. 扩容MON节点。一共有3台MON,位于ceph1-ceph3
[ceph: root@ceph1 /]# ceph orch apply mon --placement="3 ceph1 ceph2 ceph3" 

# 10. 查看mon状态
[ceph: root@ceph1 /]# ceph -s
  cluster:
    id:     a4b69ab4-79dd-11ed-ae7b-000c2953b002
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 2m)
    mgr: ceph1.gmqorm(active, since 15m), standbys: ceph3.giqaph
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[ceph: root@ceph1 /]# ceph mon stat
e3: 3 mons at {ceph1=[v2:192.168.88.11:3300/0,v1:192.168.88.11:6789/0],ceph2=[v2:192.168.88.12:3300/0,v1:192.168.88.12:6789/0],ceph3=[v2:192.168.88.13:3300/0,v1:192.168.88.13:6789/0]}, election epoch 14, leader 0 ceph1, quorum 0,1,2 ceph1,ceph3,ceph2

# 11. ceph2和ceph3上也将会出现相关容器
[root@ceph2 ~]# podman ps
[root@ceph3 ~]# podman ps
添加OSD硬盘
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph1:/dev/vdb
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph1:/dev/vdc
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph1:/dev/vdd
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph2:/dev/vdb
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph2:/dev/vdc
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph2:/dev/vdd
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph3:/dev/vdb
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph3:/dev/vdc
[ceph: root@ceph1 /]# ceph orch daemon add osd ceph3:/dev/vdd

# 2. 在节点上查询容器信息,将会发现又有新的osd容器出现
[root@ceph1 ~]# podman ps

# 3. 此时ceph的状态将会是HEALTH_OK,ceph集群搭建完成。
[ceph: root@ceph1 /]# ceph -s
  cluster:
    id:     a4b69ab4-79dd-11ed-ae7b-000c2953b002
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 2m)
    mgr: ceph1.gmqorm(active, since 2h), standbys: ceph3.giqaph
    osd: 9 osds: 9 up (since 35s), 9 in (since 59s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 2 objects, 449 KiB
    usage:   186 MiB used, 180 GiB / 180 GiB avail
    pgs:     1 active+clean
实现块存储
# 查看存储池。默认有一个名为.mgr的存储池,编号为1
[ceph: root@ceph1 /]# ceph osd lspools
1 .mgr

# 查看存储详细使用情况
[ceph: root@ceph1 /]# ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    180 GiB  180 GiB  187 MiB   187 MiB       0.10
TOTAL  180 GiB  180 GiB  187 MiB   187 MiB       0.10
 
--- POOLS ---
POOL  ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr   1    1  449 KiB        2  449 KiB      0     57 GiB

# 查看.mgr存储池的副本数量
[ceph: root@ceph1 /]# ceph osd pool get .mgr size
size: 3
存储池
创建并使用存储池
# 1. 创建名为rbd的存储池
[ceph: root@ceph1 /]# ceph osd pool create rbd 100
pool 'rbd' created

# 2. 设置rbd存储池的应用类型是rbd。还可以是rgw或cephfs
# 语法:ceph osd pool application enable <pool-name> <app-name>
[ceph: root@ceph1 /]# ceph osd pool application enable rbd rbd

# 3. 查看
[ceph: root@ceph1 /]# ceph osd pool ls 
.mgr
rbd
[ceph: root@ceph1 /]# ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    180 GiB  180 GiB  191 MiB   191 MiB       0.10
TOTAL  180 GiB  180 GiB  191 MiB   191 MiB       0.10
 
--- POOLS ---
POOL  ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr   1    1  897 KiB        2  2.6 MiB      0     57 GiB
rbd    2   99      0 B        0      0 B      0     57 GiB

# 4. 执行命令。不指定存储池,默认操作名为rbd的存储池。
[ceph: root@ceph1 /]# rbd ls   # 无输出内容,也不会报错
镜像
# 1. 查看rbd存储池中有哪些镜像
[ceph: root@ceph1 /]# rbd ls

# 2. 创建名为img1的镜像,大小10GB
[ceph: root@ceph1 /]# rbd create img1 --size 10G

# 3. 查看存储池中有哪些镜像
[ceph: root@ceph1 /]# rbd ls
img1

# 4. 查看镜像详情
[ceph: root@ceph1 /]# rbd info img1
rbd image 'img1':
        size 10 GiB in 2560 objects
...略...

# 5. 扩容。容量只是承诺大小,并不会立即分配全部空间,所以值可以超过总容量。
[ceph: root@ceph1 /]# rbd resize img1 --size 200G
Resizing image: 100% complete...done.
[ceph: root@ceph1 /]# rbd info img1
rbd image 'img1':
        size 200 GiB in 51200 objects
...略...

# 6. 删除镜像
[ceph: root@ceph1 /]# rbd rm img1
Removing image: 100% complete...done.
ceph客户端
# 1. 拷贝/linux-soft/s2/zzg/ceph_soft/cephclient-rpm/目录内所有rpm包到pubserver的/var/ftp/rpms目录
# 2. 更新yum仓库
[root@pubserver ~]# createrepo /var/ftp/rpms/

# 3. 安装ceph客户端软件
[root@client1 ~]# yum install -y ceph-common

# 4. 将ceph1上的配置文件和密钥keyring文件拷贝给客户端
[root@ceph1 ceph_soft]# scp /etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.conf 192.168.88.10:/etc/ceph/

# 5. 在客户端验证是否可以操作ceph
[root@client1 ~]# rbd create img1 --size 10G
[root@client1 ~]# rbd ls
img1
[root@client1 ~]# rbd info img1
rbd image 'img1':
        size 10 GiB in 2560 objects
...略...

# 6. 将ceph镜像映射为本地硬盘
[root@client1 ~]# rbd map img1
/dev/rbd0    # rbd为固定名称,0是编号

# 7. 查看
[root@client1 ~]# lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   60G  0 disk 
└─sda1   8:1    0   60G  0 part /
sr0     11:0    1 10.5G  0 rom  
rbd0   253:0    0   10G  0 disk   # rbd0来自于ceph镜像

[root@client1 ~]# rbd showmapped  # 镜像img1映射为了本地硬盘rbd0
id  pool  namespace  image  snap  device   
0   rbd              img1   -     /dev/rbd0

# 8. 应用
[root@client1 ~]# mkdir /data
[root@client1 ~]# mkfs.xfs /dev/rbd0
[root@client1 ~]# mount /dev/rbd0 /data/
[root@client1 ~]# df -h /data/
Filesystem      Size  Used Avail Use% Mounted on
/dev/rbd0        10G  105M  9.9G   2% /data
[root@client1 ~]# cp /etc/hosts /data/
[root@client1 ~]# ls /data/
hosts

#删除
# 查看img1的状态
[root@client1 ~]# rbd status img1

# 按以下步骤删除img1
[root@client1 ~]# umount /dev/rbd0
[root@client1 ~]# rbd unmap img1
[root@client1 ~]# rbd rm img1
Removing image: 100% complete...done.
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值