centos7.9 快速部署单机版对接k8s动态存储-1

64 篇文章 4 订阅
本次实验只提供测试环境使用,务必上生产
nautilus源配置
 hostnamectl set-hostname ceph79
 
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo 

cat <<END >/etc/yum.repos.d/ceph.repo
[norch]
name=norch
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0

[x86_64]
name=x86_64
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
END
安装ceph-deploy
yum -y install  ceph-deploy 
yum install ceph-deploy python-setuptools python2-subprocess32
yum -y install ceph ceph-mds ceph-mgr ceph-osd ceph-radosgw ceph-mon
mkdir /my-ceph &&  cd /my-ceph
创建monitor节点
ceph-deploy new ceph79
单机版末尾添加以下参数

vim ceph.conf

osd pool default min_size = 1 
osd pool default size = 1
public network = 192.168.8.79/24  <<<---你虚拟机ip
各种初始化
ceph-deploy mon create-initial
ceph-deploy admin ceph79
ceph-deploy mgr create ceph79
ceph-deploy mds create ceph79
初始化磁盘
ceph-deploy disk zap ceph79 /dev/sdb 
ceph-deploy osd create --data  /dev/sdb ceph79
创建池
ceph osd pool create fs_kube_data 32 
ceph osd pool create fs_kube_metadata 32
ceph fs new cephfs fs_kube_metadata fs_kube_data
拷贝给客户端
scp -r ceph.client.admin.keyring ceph.conf root@客户端ip:/etc/ceph/
客户端安装
yum -y install ceph-fuse
使用内核挂载上去
[root@localhost ceph]# ceph-fuse -m 192.168.8.79:6789 /mnt/
ceph-fuse[2633]: starting ceph client2020-12-02 20:58:18.964 7f2176ce2f80 -1 init, newargv = 0x55b0d1219170 newargc=9

ceph-fuse[2633]: starting fuse
[root@localhost ceph]# df -h
文件系统        容量  已用  可用 已用% 挂载点
/dev/sda3        34G  4.3G   29G   13% /
/dev/sda1      1014M  142M  873M   14% /boot
tmpfs           187M     0  187M    0% /run/user/0
ceph-fuse        14G     0   14G    0% /mnt
以下pg和副本调整

查看详情

[root@ceph79 my-ceph]# ceph osd pool ls detail
pool 1 'fs_kube_data' replicated size 1 min_size 1 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 12 flags hashpspool stripe_width 0 application cephfs
pool 2 'fs_kube_metadata' replicated size 1 min_size 1 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 12 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs

将副本2数调成2个

[root@ceph79 my-ceph]# ceph osd pool set fs_kube_data size 2
set pool 1 size to 2

[root@ceph79 my-ceph]# ceph osd pool set fs_kube_metadata size 2
set pool 2 size to 2
#查看
[root@ceph79 my-ceph]# ceph osd pool ls detail
pool 1 'fs_kube_data' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 13 flags hashpspool stripe_width 0 application cephfs
pool 2 'fs_kube_metadata' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 15 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs

GP和gpp调整

[root@ceph79 my-ceph]# ceph osd pool set fs_kube_metadata pg_num 128
set pool 2 pg_num to 128

[root@ceph79 my-ceph]# ceph osd pool set fs_kube_metadata pgp_num 128
set pool 2 pgp_num to 128
#查看
[root@ceph79 my-ceph]# ceph osd pool get fs_kube_metadata pg_num
pg_num: 128
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值