CentOS 7.7安装Ceph nautilus

CentOS 7.7最小化安装

所有节点安装epel源

yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm


所有节点初始化配置
nmtui配置网络连接,自动连接
# Kernel pid max
echo 4194303 > /proc/sys/kernel/pid_max
#关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
##停止firewall 禁止firewall开机启动
systemctl stop firewalld.service
systemctl disable firewalld.service

##host访问
vim /etc/hosts:
192.168.111.179 ceph1
192.168.111.180 ceph2
192.168.111.181 ceph3

hostnamectl set-hostname ceph1
hostnamectl set-hostname ceph2
hostnamectl set-hostname ceph3


## 安装ntp
yum install ntp

vi /etc/ntp.conf
## 修改启动ntp服务
##注释如下四项,然后选择内部的时钟服务器或者ceph管理节点
server 192.168.111.179
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
##启动
systemctl restart ntpd.service
systemctl enable ntpd.service


## 其他
yum install wget net-tools


所有添加Ceph源

cat << EOM > /etc/yum.repos.d/ceph.repo
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-nautilus/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
EOM

Update your repository and install ceph-deploy:

管理节点执行
生成密钥
##生成秘钥,免密码登陆
ssh-keygen
ssh-copy-id root@ceph1
ssh-copy-id root@ceph2
ssh-copy-id root@ceph3

安装配置 ceph-deploy

yum install -y https://download.ceph.com/rpm-nautilus/el7/noarch/ceph-deploy-2.0.1-0.noarch.rpm
#yum install ceph-deploy
 


创建集群
创建目录
mkdir my_cluster
cd my_cluster
生成ceph.conf和kering

ceph-deploy new ceph1 ceph2 ceph3   ##会生成ceph.conf和ceph.mon.keyring

[root@ceph1 my_cluster]# ceph-deploy new ceph1 ceph2 ceph3
Traceback (most recent call last):
  File "/usr/bin/ceph-deploy", line 18, in <module>
    from ceph_deploy.cli import main
  File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
    import pkg_resources
ImportError: No module named pkg_resources
[root@ceph1 my_cluster]# yum install python-pkg-resources python-setuptools

yum install python-pkg-resources python-setuptools


配置ceph.conf
cd my-cluster
vim ceph.conf:
[global]
fsid = 2595ba62-8ae1-difg-893a-892a675757c6
mon_initial_members = ceph1, ceph2, ceph3
mon_host = 172.28.7.239,172.28.7.240,172.28.7.241
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
##如果有两块可以连通的网卡,配置public_network和cluster_network,如果是测试环境没有,就不配置
#public_network = 172.28.7.0/22     #管理网络
#cluster_network = 172.28.11.0/22   #集群网络
mon_pg_warn_max_per_osd = 1000     
osd_pool_default_size = 3          
osd_pool_default_min_size = 2      
mon_osd_backfillfull_ratio = 0.75
mon_osd_full_ratio = .85            
mon_osd_nearfull_ratio = .70        
osd_failsafe_full_ratio = 0.90
osd_deep_scrub_randomize_ratio = 0.01
[mgr]
mgr modules = dashboard
[osd]
osd_max_write_size = 1024            
osd_recovery_op_priority = 1        
osd_recovery_max_active = 1         
osd_recovery_max_single_start = 1   
osd_recovery_max_chunk = 1048576    
osd_recovery_threads = 1            
osd_max_backfills = 1               
osd_scrub_begin_hour = 22          
osd_scrub_end_hour = 7             
osd_recovery_sleep = 0              
osd_crush_update_on_start = false  ##如果初始化的时候,把该值设置为true,然后重启所有osd。不然创建完pool会提示:100.000% pgs unknown100.000% pgs unknown。所有osd都加入,集群ok后,再统一把该值设置为false, 然后重启所有osd
注意:只要新创建了pool, 所有osd 设置:osd_crush_update_on_start = true 才会进行调整

安装ceph

# export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/rpm-nautilus/el7
# export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc

export CEPH_DEPLOY_REPO_URL=http://mirrors.ustc.edu.cn/ceph/rpm-nautilus/el7
export CEPH_DEPLOY_GPG_URL=http://mirrors.ustc.edu.cn/ceph/keys/release.asc

export CEPH_DEPLOY_REPO_URL=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7
export CEPH_DEPLOY_GPG_URL=http://mirrors.aliyun.com/ceph/keys/release.asc

export CEPH_DEPLOY_REPO_URL=https://download.ceph.com/rpm-nautilus/el7
export CEPH_DEPLOY_GPG_URL=https://download.ceph.com/keys/release.asc


# ceph-deploy install ceph1 ceph2 ceph3

或者

# ceph-deploy install --release nautilus ceph1 ceph2 ceph3

集群初始化
# cd my_cluster
# ceph-deploy mon create-initial

拷贝管理文件
根据实际情况,拷贝管理文件到设定的管理节点
ceph-deploy admin ceph1 ceph2 ceph3

配置Manager节点
ceph-deploy mgr create ceph1
扩展Manager节点
ceph-deploy mgr create ceph2
ceph-deploy mgr create ceph3

测试集群的健康状态
[root@ceph1 my_cluster]# ceph -s
  cluster:
    id:     f75e1135-05c8-4765-9503-bb353722c879
    health: HEALTH_WARN
            clock skew detected on mon.ceph2, mon.ceph3

  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 7m)
    mgr: ceph1(active, since 24s), standbys: ceph2, ceph3
    osd: 0 osds: 0 up, 0 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

[root@ceph1 my_cluster]#

[root@ceph1 my_cluster]# ceph health
HEALTH_WARN clock skew detected on mon.ceph2, mon.ceph3
[root@ceph1 my_cluster]#


添加OSD
ceph-deploy osd create --data /dev/sdb ceph1
ceph-deploy osd create --data /dev/sdb ceph2
ceph-deploy osd create --data /dev/sdb ceph3

[root@ceph1 my_cluster]# ceph -s
  cluster:
    id:     f75e1135-05c8-4765-9503-bb353722c879
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
    mgr: ceph1(active, since 6m), standbys: ceph2, ceph3
    osd: 3 osds: 3 up (since 11s), 3 in (since 11s)

  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   3.0 GiB used, 432 GiB / 435 GiB avail
    pgs:

[root@ceph1 my_cluster]# ceph osd tree
ID CLASS WEIGHT TYPE NAME    STATUS REWEIGHT PRI-AFF
-1            0 root default
 0   hdd      0 osd.0            up  1.00000 1.00000
 1   hdd      0 osd.1            up  1.00000 1.00000
 2   hdd      0 osd.2            up  1.00000 1.00000
[root@ceph1 my_cluster]#

配置dashboard
1.    Install the appropriate package from the below link
# yum install http://download.ceph.com/rpm-nautilus/el7/noarch/ceph-mgr-dashboard-14.2.1-0.el7.noarch.rpm

# yum install http://mirrors.163.com/ceph/rpm-nautilus/el7/noarch/ceph-mgr-dashboard-14.2.2-0.el7.noarch.rpm
Note: it will ask for few dependencies, install with yum/apt package manager and then execute the above command.
2.    Enable the ceph mgr dashboard
# ceph mgr module enable dashboard --force 
# ceph mgr module ls 
3.    Create self-signed certificate
# ceph dashboard create-self-signed-cert 
Self-signed certificate created 
4.    Create a user for Dashboard
Example: [ceph dashboard ac-user-create (username) (password) administrator]

# ceph dashboard ac-user-create admin password administrator 
{"username": "cent", "lastUpdate": 1560292901, "name": null, "roles": ["administrator"], "password": "$2b$12$w60gItcbKd6PULNYI9McmOBMiAzFoKJ9T9XGva8vC6dxIyqMsE4kK", "email": null}

# ceph mgr services 
{
    "dashboard": "https://ceph-mgr:8443/"
}
Note: Here you can access with IP address of ceph-mgr node, instead of hostname.
5.    Make sure firewall port is open
# firewall-cmd --add-port=8443/tcp --permanent 
# firewall-cmd --reload 
6.    Open the dashboard url in any browser
https://ceph-mgr:8443 or https://192.168.1.10:8443
7.    Enter the username: cent and password: password
Here you go...

客户端上操作:
yum -y install centos-release-ceph-nautilus.noarch
yum -y install ceph-common

ceph 服务器上操作:
ceph auth get-or-create client.clt132 mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=rbd'

# ceph auth get-or-create client.clt132 mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=rbd'
[client.clt132]
key = AQCRVYldu2N4CBAAD5UiNpWnrE3GlHVLa12Miw==

ceph auth get-or-create client.clt132 | tee /etc/ceph/ceph.client.clt132.keyring
scp /etc/ceph/ceph.client.clt132.keyring 192.168.111.132:/etc/ceph/
scp /etc/ceph/ceph.conf 192.168.111.132:/etc/ceph/

客户端上操作:
rbd --image rbd_data1 info --name client.clt132


ceph -s --name client.clt132
rbd create rbd/rbd132 --size 1G --image-feature layering --name client.clt132

rbd --image rbd132 info --name client.clt132
rbd map rbd/rbd132 --name client.clt132
rbd showmapped --name client.clt132
mkfs.xfs /dev/rbd0
mount /dev/rbd0 /mnt/
df -h
umount /mnt/
rbd unmap rbd/rbd132 --name client.clt132

rbd map rbd/rbd_data1 --name client.clt132
mount /dev/rbd1 /mnt/
umount /mnt/
rbd unmap rbd/rbd_data1 --name client.clt132
 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值