ubuntu 18.04 搭建ceph luminous 12.2.12 Ceph 文件系统

1,ubuntu 18.04 搭建ceph luminous 12.2.12 Ceph 文件系统

# ceph -s
  cluster:
    id:     5df0ea22-7a9b-48c9-a495-a880b0b29014
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum a29,a30,a31
    mgr: a29(active), standbys: a30, a31
    osd: 13 osds: 13 up, 13 in
 
  data:
    pools:   1 pools, 512 pgs
    objects: 475.10k objects, 1.81TiB
    usage:   3.63TiB used, 127TiB / 130TiB avail
    pgs:     512 active+clean
 
  io:
    client:   27.1KiB/s rd, 105KiB/s wr, 6op/s rd, 5op/s wr

  • 创建元数据服务器
ceph-deploy mds create a29 a30 a31
  • 创建存储池
# ceph osd pool create cephfs_data 128
pool 'cephfs_data' created
# ceph osd pool create cephfs_metadata 512
pool 'cephfs_metadata' created
  • 创建文件系统
# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2
# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
# ceph mds stat
cephfs-1/1/1 up  {0=a31=up:creating}, 2 up:standby

2,挂载 CEPH 文件系统

  • 内核挂载
# mkdir /cephfs
  • 查看配置启用了cephx认证
# cat /etc/ceph/ceph.conf 
[global]
fsid = 5df0ea22-7a9b-48c9-a495-a880b0b29014
mon_initial_members = a29
mon_host = 192.168.1.179
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

public_network = 192.168.1.0/24
  • 导出cephx认证的key
# ceph auth export client.admin
export auth(auid = 18446744073709551615 key=AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ== with 4 caps)
[client.admin]
	key = AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ==
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"
  • 挂载
mount -t ceph 192.168.1.181:6789:/ /cephfs/ -o name=admin,secret=AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ==
# df -Th
192.168.1.181:6789:/ ceph       60T     0   60T    0% /cephfs
  • 自动挂载
# cd /etc/ceph/
# touch admin.key
# echo AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ== >> /etc/ceph/admin.key
# vim /etc/fstab
192.168.1.181:6789:/ /cephfs/ -o name=admin,secretfile=/etc/ceph/admin.key,noatime,_netdev  0  0

报错

# ceph -s
  cluster:
    id:     5df0ea22-7a9b-48c9-a495-a880b0b29014
    health: HEALTH_ERR
            1 MDSs report slow metadata IOs
            Reduced data availability: 238 pgs inactive
            21 stuck requests are blocked > 4096 sec. Implicated osds 0,1,3,7,8,9,10
 
  services:
    mon: 3 daemons, quorum a29,a30,a31
    mgr: a29(active), standbys: a30, a31
    mds: cephfs-1/1/1 up  {0=a31=up:creating}, 2 up:standby
    osd: 13 osds: 13 up, 13 in
 
  data:
    pools:   3 pools, 1152 pgs
    objects: 479.59k objects, 1.83TiB
    usage:   3.67TiB used, 127TiB / 130TiB avail
    pgs:     20.660% pgs not active
             914 active+clean
             238 creating+activating
 
  io:
    client:   41.8KiB/s rd, 2.96KiB/s wr, 10op/s rd, 0op/s wr
  • 存储池副本数设置为2
# ceph osd pool get cephfs_data  size
size: 3
# ceph osd pool get cephfs_metadata  size
size: 3
# ceph osd pool set cephfs_data size 2
set pool 2 size to 2
# ceph osd pool set cephfs_metadata size 2
set pool 3 size to 2
# ceph -s
  cluster:
    id:     5df0ea22-7a9b-48c9-a495-a880b0b29014
    health: HEALTH_WARN
            Reduced data availability: 30 pgs inactive
 
  services:
    mon: 3 daemons, quorum a29,a30,a31
    mgr: a29(active), standbys: a30, a31
    mds: cephfs-1/1/1 up  {0=a31=up:active}, 2 up:standby
    osd: 13 osds: 13 up, 13 in
 
  data:
    pools:   3 pools, 1152 pgs
    objects: 479.61k objects, 1.83TiB
    usage:   3.67TiB used, 127TiB / 130TiB avail
    pgs:     2.604% pgs not active
             1122 active+clean
             30   creating+activating
 
  io:
    client:   0B/s rd, 2.97KiB/s wr, 0op/s rd, 0op/s wr
# ceph health detail
HEALTH_WARN Reduced data availability: 30 pgs inactive
PG_AVAILABILITY Reduced data availability: 30 pgs inactive
    pg 3.169 is stuck inactive for 16569.894513, current state creating+activating, last acting [10,12]
    pg 3.16c is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
    pg 3.16d is stuck inactive for 16569.894513, current state creating+activating, last acting [11,12]
    pg 3.173 is stuck inactive for 16569.894513, current state creating+activating, last acting [9,12]
    pg 3.177 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.17b is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
    pg 3.17d is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.184 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.18d is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
    pg 3.193 is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
    pg 3.19d is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
    pg 3.1a0 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.1a4 is stuck inactive for 16569.894513, current state creating+activating, last acting [3,12]
    pg 3.1aa is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
    pg 3.1ae is stuck inactive for 16569.894513, current state creating+activating, last acting [9,12]
    pg 3.1b4 is stuck inactive for 16569.894513, current state creating+activating, last acting [3,12]
    pg 3.1bc is stuck inactive for 16569.894513, current state creating+activating, last acting [8,12]
    pg 3.1bd is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
    pg 3.1bf is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.1c3 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.1c5 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.1c7 is stuck inactive for 16569.894513, current state creating+activating, last acting [4,12]
    pg 3.1c9 is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
    pg 3.1d5 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
    pg 3.1d6 is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
    pg 3.1eb is stuck inactive for 16569.894513, current state creating+activating, last acting [6,12]
    pg 3.1ef is stuck inactive for 16569.894513, current state creating+activating, last acting [11,12]
    pg 3.1f0 is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
    pg 3.1f6 is stuck inactive for 16569.894513, current state creating+activating, last acting [10,12]
    pg 3.1f9 is stuck inactive for 16569.894513, current state creating+activating, last acting [8,12]
  • 重启了osd12
# systemctl stop ceph-osd@12.service 
# systemctl start ceph-osd@12.service 
# systemctl status ceph-osd@12.service
  • 设置为1副本之后,集群状态OK
# ceph osd pool set cephfs_data size 1
set pool 2 size to 1
r# ceph osd pool set cephfs_metadata size 1
set pool 3 size to 1

# ceph health detail
HEALTH_OK
# ceph osd pool get cephfs_data size 
size: 2
# ceph osd pool get cephfs_metadata size 
size: 1
  • 查看存储池PG分布
ceph pg ls-by-pool cephfs_metadata | awk '{print $1,$2,$15}' | head

参考:

  1. Ceph-06 Ceph 文件系统
  2. CEPH 文件系统 官方文档
  3. ceph中的PG和PGP
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值