手动部署 使用 bluestore 后端的 osd (L版)

==========================
手动部署 bluestore
==========================

假设集群为xtao,mon已经启动。本host为 hehe。
以/dev/sdc为例子介绍如何配置bluestore。

使用ceph版本为 12.2.0

----------------------------
1. 关键配置信息
----------------------------

本步骤中需要准备如下4点:

::

   //1. 存放osd keyring等配置信息的 xfs(或者其他fs)目录
   // 100MB
   osd data = /tmp/cbt/mnt/osd-device-0-data
   //2. 存放用户数据的盘/分区
   bluestore block path = /dev/disk/by-partlabel/osd-device-0-block
   //3. 存放 rocksdb 的盘/分区
   // 1GB
   bluestore block db path = /dev/disk/by-partlabel/osd-device-0-db
   //4. 存放 wal 的盘/分区
   // 1GB
   bluestore block wal path = /dev/disk/by-partlabel/osd-device-0-wal

---------------------------
2. 配置osd
---------------------------

2.1 格式化成4个分区
-------------------------

也可以使用独立的磁盘。

::

  // 清除以有的分区信息
  // 下面的 osd_data_0/bluestore_block_db_0等,表示 osd 0 的分区,确保label系统中唯一
  // 要想使用识别某类盘,需要使用 --typecode= 参数
  # sgdisk --zap-all /dev/sdc
  # sgdisk --new=1:0:+1GB --change-name=1:osd_data_0 --partition-guid=1:$(uuidgen) --mbrtogpt -- /dev/sdc
  Creating new GPT entries.
  The operation has completed successfully.
  # sgdisk --new=2:0:+1GB --change-name=2:bluestore_block_db_0 --partition-guid=2:$(uuidgen) --mbrtogpt -- /dev/sdc
  The operation has completed successfully.
  [root@hehe hehe-0]# sgdisk --new=3:0:+1GB --change-name=3:bluestore_block_wal --partition-guid=3:$(uuidgen) --mbrtogpt -- /dev/sdc
  The operation has completed successfully.
  # sgdisk --largest-new=4 --change-name=4:bluestore_block_0 --partition-guid=4:$(uuidgen) --mbrtogpt -- /dev/sdc
  The operation has completed successfully.
  # partx /dev/sdc
  NR   START      END  SECTORS SIZE NAME                  UUID
   1    2048  2099199  2097152   1G osd_data_0            2f568a34-d89c-4c74-a444-602553e35fbf
   2 2099200  4196351  2097152   1G bluestore_block_db_0  5023371f-29e6-4d28-ae4d-f84107c1d368
   3 4196352  6293503  2097152   1G bluestore_block_wal_0 cc6995ae-77fa-4a05-88a3-88389bb31a07
   4 6293504 41943006 35649503  17G bluestore_block_0     ecd23004-d31f-4603-a8dd-b931902c125d
  # ls -l /dev/disk/by-partlabel/ | grep _0
  lrwxrwxrwx. 1 root root 10 Oct 11 08:27 bluestore_block_0 -> ../../sdc4
  lrwxrwxrwx. 1 root root 10 Oct 11 08:27 bluestore_block_db_0 -> ../../sdc2
  lrwxrwxrwx. 1 root root 10 Oct 11 08:27 bluestore_block_wal_0 -> ../../sdc3
  lrwxrwxrwx. 1 root root 10 Oct 11 08:27 osd_data_0 -> ../../sdc1

2.1 创建osd0
--------------------------

::

  # ceph osd create --cluster xtao
  0

2.2 准备 osd data目录 
----------------------

::

  # mkfs.xfs /dev/sdc1  
  # mount /dev/sdc1 /var/lib/ceph/osd/xtao-0
  # cd /var/lib/ceph/osd/xtao-0/
  # echo "bluestore" > type

2.3 配置bluestore
------------------

有两种方式:

- 配置文件中指定

::

  [osd.0]
    host = hehe
    osd data = /var/lib/ceph/osd/xtao-0/
    bluestore block path = /dev/disk/by-partlabel/osd-device-0-block
    bluestore block db path = /dev/disk/by-partlabel/osd-device-0-db
    bluestore block wal path = /dev/disk/by-partlabel/osd-device-0-wal

- osd data 目录下指定软连接

::

  # cd /var/lib/ceph/osd/xtao-0
  # ln -sf /dev/disk/by-partlabel/bluestore_block_0 block
  # ln -sf /dev/disk/by-partlabel/bluestore_block_db_0 block.db
  # ln -sf /dev/disk/by-partlabel/bluestore_block_wal_0 block.wal

2.4 创建 各种 元数据文件,并生成 keyring
------------------------------------------- 

::

  // This creates all the metadata files, "formats" the block devices, 
  // and also generates a Ceph auth key for your new OSD.
  # ceph-osd  -i 0 --mkkey --mkfs --cluster xtao
  2017-10-11 08:39:13.548990 7fc54f2c9d00 -1 bluestore(/var/lib/ceph/osd/xtao-0) _read_fsid unparsable uuid 
  # ls
  block     block.wal  ceph_fsid  keyring     magic      ready  whoami
  block.db  bluefs     fsid       kv_backend  mkfs_done  type
  # cat keyring 
  [osd.0]
          key = AQD0EN5ZmTMeGRAAVB7EipKsxttc5Ch9uICOaw==
  # cat ceph_fsid 
  3127245c-ae67-11e7-a010-080027e8d23c
  # ceph -s --cluster xtao
    cluster:
      id:     3127245c-ae67-11e7-a010-080027e8d23c
    ...

2.5 为osd创建auth条目
---------------------

::

  # ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/xtao-0/keyring --cluster xtao
  added key for osd.0
  [root@hehe xtao-0]# ceph auth list --cluster xtao
  installed auth entries:
  
  osd.0
          key: AQD0EN5ZmTMeGRAAVB7EipKsxttc5Ch9uICOaw==
          caps: [mon] allow rwx
          caps: [osd] allow *

2.6 修改crush tree
-------------------

::

  # ceph osd tree --cluster xtao
  ID CLASS WEIGHT TYPE NAME    STATUS REWEIGHT PRI-AFF 
  -1            0 root default                         
   0            0 osd.0          down        0 1.00000 
  # ceph osd crush add 0 7.3000 host=`hostname` --cluster xtao
  add item id 0 name 'osd.0' weight 7.3 at location {host=hehe} to crush map
  # ceph osd tree --cluster xtao
  ID CLASS WEIGHT  TYPE NAME    STATUS REWEIGHT PRI-AFF 
  -2       7.29999 host hehe                            
   0       7.29999     osd.0      down        0 1.00000

---------------------------
3. 启动osd
---------------------------

- 使用 systemctl 控制 ceph-osd 服务 

::

  # systemctl enable ceph-osd@0

- 修改 /usr/lib/systemd/system/ceph-osd@.service

::

  [Unit]
  Description=Ceph object storage daemon osd.%i
  After=network-online.target local-fs.target time-sync.target ceph-mon.target
  Wants=network-online.target local-fs.target time-sync.target
  PartOf=ceph-osd.target
  
  [Service]
  LimitNOFILE=1048576
  LimitNPROC=1048576
  EnvironmentFile=-/etc/sysconfig/ceph
  Environment=CLUSTER=xtao      <---------<<< 集群名字修改
  ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i  <----------<<<< 不使用ceph user等
  ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i
  ExecReload=/bin/kill -HUP $MAINPID
  ProtectHome=true
  ProtectSystem=full
  PrivateTmp=true
  TasksMax=infinity
  Restart=on-failure
  StartLimitInterval=30min
  StartLimitBurst=30
  RestartSec=20s
  
  [Install]
  WantedBy=ceph-osd.target

- 检查osd服务状态

::

  # ceph -s --cluster xtao
    cluster:
      id:     3127245c-ae67-11e7-a010-080027e8d23c
      health: HEALTH_OK
   
    services:
      mon: 1 daemons, quorum hehe
      mgr: hehe(active)
      osd: 1 osds: 1 up, 1 in
   
    data:
      pools:   0 pools, 0 pgs
      objects: 0 objects, 0 bytes
      usage:   2048 MB used, 16382 MB / 18430 MB avail
      pgs: 

--------------------------
参考
--------------------------

[1] https://github.com/MartinEmrich/kb/blob/master/ceph/Manual-Bluestore.md

[2] ceph-disk工作机制 http://blog.csdn.net/guzyguzyguzy/article/details/46729391


  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值