Ceph实战入门系列(一)——三节点Ceph集群的安装与部署

配置环境

环境说明

  • 在Win7下安装vmware软件,用CentOS-7.2创建一台centos7.2-mini虚拟机,安装完后把内存改为512MB就足够了,如果是用来实践的话。
  • 基于上面的虚拟机链接克隆3台虚拟机

修改hostname

# 分别在3个host上执行
[root@localhost ~]# hostnamectl set-hostname ceph6
[root@localhost ~]# hostnamectl set-hostname ceph7
[root@localhost ~]# hostnamectl set-hostname ceph8
  • 1
  • 2
  • 3
  • 4

配置解析

# 全体都有,并测试网络是否连通
[root@ceph8 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.103.139 ceph6
192.168.103.140 ceph7
192.168.103.138 ceph8
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

配置防火墙启用端口

# 全体都有
[root@ceph6 ~]# firewall-cmd --zone=public --add-port=6789/tcp --permanent 
success
[root@ceph6 ~]# firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent 
success
[root@ceph6 ~]# firewall-cmd --reload
success
[root@ceph6 ~]# firewall-cmd --zone=public --list-all 
public (default, active)
  interfaces: eno16777736
  sources: 
  services: dhcpv6-client ssh
  ports: 6789/tcp 6800-7100/tcp
  masquerade: no
  forward-ports: 
  icmp-blocks: 
  rich rules: 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18

禁用SELINUX

# 全体都有
[root@ceph6 ~]# setenforce 0
[root@ceph6 ~]# vim /etc/selinux/config 
[root@ceph6 ~]# cat /etc/selinux/config | grep disabled
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

配置网络

# 全体都有;公司内部开发网需要单独设置
[root@ceph6 ~]# cat /etc/yum.conf 
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
debuglevel=2
logfile=/var/log/yum.log
exactarch=1
obsoletes=1
gpgcheck=1
plugins=1
installonly_limit=5
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
Proxy=http://dev-proxy.oa.com:8080/

[root@ceph6 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.14.87.100 dev-proxy.oa.com
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

设置全局代理

# 全体都有;公司网络限制,需要设置全局代理,需要重启主机(poweroff命令,再开机)
http_proxy=dev-proxy.oa.com:8080
https_proxy=dev-proxy.oa.com:8080
export http_proxy https_proxy
  • 1
  • 2
  • 3
  • 4

安装配置ntp服务

# 全体都有, 10.14.0.131是公司的ntp server
[root@ceph6 ~]# cat /etc/ntp.conf | grep server | grep -v '#'
server 10.14.0.131
  • 1
  • 2
  • 3
  • 4

wget下载rpm;代理

# wget代理下载rpm,发现不能install,不知道原因
[root@ceph6 ~]# wget http://dev-proxy.oa.com:8080/ http://download.ceph.com/rpm-giant/el7/noarch/ceph-release-1-0.el7.noarch.rpm -e http_proxy=dev-proxy.oa.com:8080/
  • 1
  • 2

Ceph安装与部署

安装部署工具

[root@ceph6 ~]# yum install -y ceph-deploy
  • 1
  • 2

初始化monitor

# 在ceph6上操作
[root@ceph6 ~]# mkdir /etc/ceph
[root@ceph6 ~]# cd /etc/ceph/
[root@ceph6 ceph]# ceph-deploy new ceph6
[root@ceph6 ceph]# ls
ceph.conf  ceph.log  ceph.mon.keyring
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

免密码登录

# 在ceph6上操作
[root@ceph6 ~]# ssh-keygen 
[root@ceph6 ~]# ssh-copy-id root@ceph7
[root@ceph6 ~]# ssh-copy-id root@ceph8
  • 1
  • 2
  • 3
  • 4

远程安装部署

# 网络问题;通过第一步的设置全局代理并未解决
[root@ceph6 ~]# ceph-deploy install ceph6 ceph7 ceph8
# 省略
[ceph6][DEBUG ] 
[ceph6][DEBUG ] Complete!
[ceph6][DEBUG ] Configure Yum priorities to include obsoletes
[ceph6][WARNIN] check_obsoletes has been enabled for Yum priorities plugin
[ceph6][INFO  ] Running command: rpm --import https://download.ceph.com/keys/release.asc
[ceph6][WARNIN] curl: (7) Failed to connect to 2607:f298:6050:51f3:f816:3eff:fe71:9135: Network is unreachable
[ceph6][WARNIN] error: https://download.ceph.com/keys/release.asc: import read failed(2).
[ceph6][ERROR ] RuntimeError: command returned non-zero exit status: 1
[ceph_deploy][ERROR ] RuntimeError: Failed to execute command: rpm --import https://download.ceph.com/keys/release.asc
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

独立安装部署

[root@ceph7 ~]# ceph-deploy install ceph7
[root@ceph8 ~]# ceph-deploy install ceph8
  • 1
  • 2

搭建集群

创建OSD

# 列出ceph6上所有的可用磁盘
[root@ceph6 ceph]# ceph-deploy disk list ceph6
# disk zap 子命令删除现有分区表和磁盘内容
[root@ceph6 ceph]# ceph-deploy disk zap ceph6:sdb ceph6:sdc ceph6:sdd
[root@ceph6 ceph]# ceph-deploy disk zap ceph7:sdb ceph7:sdc ceph7:sdd
[root@ceph6 ceph]# ceph-deploy disk zap ceph8:sdb ceph8:sdc ceph8:sdd
# osd create 子命令首先准备磁盘(用xfs文件系统格式化磁盘),然后激活磁盘的第一、第二个分区,作为数据分区和日志分区
[root@ceph6 ceph]# ceph-deploy osd create ceph6:sdb ceph6:sdc ceph6:sdd
[root@ceph6 ceph]# ceph-deploy osd create ceph7:sdb ceph7:sdc ceph7:sdd
[root@ceph6 ceph]# ceph-deploy osd create ceph8:sdb ceph8:sdc ceph8:sdd
# 检查ceph的状态
[root@ceph6 ceph]# ceph -s
    cluster 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
     health HEALTH_WARN
            too few PGs per OSD (21 < min 30)
     monmap e1: 1 mons at {ceph6=192.168.103.139:6789/0}
            election epoch 3, quorum 0 ceph6
     osdmap e43: 9 osds: 9 up, 9 in
            flags sortbitwise
      pgmap v95: 64 pgs, 1 pools, 0 bytes data, 0 objects
            307 MB used, 134 GB / 134 GB avail
                  64 active+clean
# 解决HEALTH_WARN
[root@ceph6 ceph]# ceph osd pool set rbd pg_num 128
[root@ceph6 ceph]# ceph osd pool set rbd pg_num 128
[root@ceph6 ceph]# ceph -s
    cluster 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
     health HEALTH_OK
     monmap e1: 1 mons at {ceph6=192.168.103.139:6789/0}
            election epoch 3, quorum 0 ceph6
     osdmap e48: 9 osds: 9 up, 9 in
            flags sortbitwise
      pgmap v130: 128 pgs, 1 pools, 0 bytes data, 0 objects
            309 MB used, 134 GB / 134 GB avail
                 128 active+clean
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36

部署奇数个数的monitor

# 将公共网络地址添加到配置文件
[root@ceph6 ceph]# cat /etc/ceph/ceph.conf 
[global]
fsid = 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
mon_initial_members = ceph6,ceph7,ceph8
mon_host = 192.168.103.139,192.168.103.140,192.168.103.138
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 192.168.103.0/24

# 使用ceph-deploy命令在ceph7和ceph8上创建一个monitor
[root@ceph6 ceph]# ceph-deploy mon create ceph7 --overwrite-conf
[root@ceph6 ceph]# ceph-deploy --overwrite-conf mon create ceph8
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

实践使用ceph集群

检查Ceph的状态

[root@ceph6 ceph]# ceph -s
    cluster 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
     health HEALTH_OK
     monmap e3: 3 mons at {ceph6=192.168.103.139:6789/0,ceph7=192.168.103.140:6789/0,ceph8=192.168.103.138:6789/0}
            election epoch 8, quorum 0,1,2 ceph8,ceph6,ceph7
     osdmap e48: 9 osds: 9 up, 9 in
            flags sortbitwise
      pgmap v130: 128 pgs, 1 pools, 0 bytes data, 0 objects
            309 MB used, 134 GB / 134 GB avail
                 128 active+clean
[root@ceph6 ceph]# ceph status
    cluster 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
     health HEALTH_OK
     monmap e3: 3 mons at {ceph6=192.168.103.139:6789/0,ceph7=192.168.103.140:6789/0,ceph8=192.168.103.138:6789/0}
            election epoch 8, quorum 0,1,2 ceph8,ceph6,ceph7
     osdmap e48: 9 osds: 9 up, 9 in
            flags sortbitwise
      pgmap v130: 128 pgs, 1 pools, 0 bytes data, 0 objects
            309 MB used, 134 GB / 134 GB avail
                 128 active+clean
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

观察集群健康状态

[root@ceph6 ceph]# ceph -w
    cluster 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
     health HEALTH_OK
     monmap e3: 3 mons at {ceph6=192.168.103.139:6789/0,ceph7=192.168.103.140:6789/0,ceph8=192.168.103.138:6789/0}
            election epoch 8, quorum 0,1,2 ceph8,ceph6,ceph7
     osdmap e48: 9 osds: 9 up, 9 in
            flags sortbitwise
      pgmap v130: 128 pgs, 1 pools, 0 bytes data, 0 objects
            309 MB used, 134 GB / 134 GB avail
                 128 active+clean

2016-11-09 10:01:39.675477 mon.0 [INF] from='client.4174 :/0' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.bootstrap-mds", "caps": ["mon", "allow profile bootstrap-mds"]}]: dispatch
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

检查Ceph monitor仲裁状态

[root@ceph6 ceph]# ceph quorum_status --format json-pretty

{
    "election_epoch": 8,
    "quorum": [
        0,
        1,
        2
    ],
    "quorum_names": [
        "ceph8",
        "ceph6",
        "ceph7"
    ],
    "quorum_leader_name": "ceph8",
    "monmap": {
        "epoch": 3,
        "fsid": "8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5",
        "modified": "2016-11-09 10:01:31.732730",
        "created": "2016-11-08 22:36:48.791105",
        "mons": [
            {
                "rank": 0,
                "name": "ceph8",
                "addr": "192.168.103.138:6789\/0"
            },
            {
                "rank": 1,
                "name": "ceph6",
                "addr": "192.168.103.139:6789\/0"
            },
            {
                "rank": 2,
                "name": "ceph7",
                "addr": "192.168.103.140:6789\/0"
            }
        ]
    }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40

导出Ceph monitor信息

[root@ceph6 ceph]# ceph mon dump
dumped monmap epoch 3
epoch 3
fsid 8ea4fa79-3b6b-4de3-8cfb-a0922d6827c5
last_changed 2016-11-09 10:01:31.732730
created 2016-11-08 22:36:48.791105
0: 192.168.103.138:6789/0 mon.ceph8
1: 192.168.103.139:6789/0 mon.ceph6
2: 192.168.103.140:6789/0 mon.ceph7
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

检查集群使用状态

[root@ceph6 ceph]# ceph df
GLOBAL:
    SIZE     AVAIL     RAW USED     %RAW USED 
    134G      134G         309M          0.22 
POOLS:
    NAME     ID     USED     %USED     MAX AVAIL     OBJECTS 
    rbd      0         0         0        45942M           0 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

检查Ceph monitor、OSD和PG(配置组)状态

[root@ceph6 ceph]# ceph mon stat
e3: 3 mons at {ceph6=192.168.103.139:6789/0,ceph7=192.168.103.140:6789/0,ceph8=192.168.103.138:6789/0}, election epoch 8, quorum 0,1,2 ceph8,ceph6,ceph7
[root@ceph6 ceph]# ceph osd stat
     osdmap e48: 9 osds: 9 up, 9 in
            flags sortbitwise
[root@ceph6 ceph]# ceph pg stat
v130: 128 pgs: 128 active+clean; 0 bytes data, 309 MB used, 134 GB / 134 GB avail
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

列表PG

[root@ceph6 ceph]# ceph pg dump
  • 1

列表Ceph存储池

[root@ceph6 ceph]# ceph osd lspools
0 rbd,
  • 1
  • 2
  • 3

检查OSD的CRUSH map

[root@ceph6 ceph]# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 0.13129 root default                                     
-2 0.04376     host ceph6                                   
 0 0.01459         osd.0       up  1.00000          1.00000 
 1 0.01459         osd.1       up  1.00000          1.00000 
 2 0.01459         osd.2       up  1.00000          1.00000 
-3 0.04376     host ceph7                                   
 3 0.01459         osd.3       up  1.00000          1.00000 
 4 0.01459         osd.4       up  1.00000          1.00000 
 5 0.01459         osd.5       up  1.00000          1.00000 
-4 0.04376     host ceph8                                   
 6 0.01459         osd.6       up  1.00000          1.00000 
 7 0.01459         osd.7       up  1.00000          1.00000 
 8 0.01459         osd.8       up  1.00000          1.00000 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

列表集群的认证密钥

[root@ceph6 ceph]# ceph auth list
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值