LNMP搭建wordpress,单机版、数据库分离、web集群(二)之Ceph分布式存储
1)构建yum源
[root@client ~]# yum install -y vsftpd
[root@client ~]# systemctl restart vsftpd
[root@client ~]# mkdir /var/ftp/ceph
[root@client ~]# mount ceph10.iso /var/ftp/ceph/
[root@client ~]# systemctl stop firewalld
2)配置SSH
[root@node1 ~]# ssh-keygen -f /root/.ssh/id_rsa -N ''
[root@node1 ~]# for i in 41 42 43; do ssh-copy-id 192.168.2.$i; done
3)配置主机名
[root@node1 ~]# vim /etc/hosts
192.168.2.41 node1
192.168.2.42 node2
192.168.2.43 node3
[root@node1 ~]# for i in 41 42 43; do scp /etc/hosts 192.168.2.$i:/etc; done
4)为每台机器配置ceph的yum源
[root@node1 ~]# vim /etc/yum.repos.d/ceph.repo
[mon]
name=mon
baseurl=ftp://192.168.4.10/ceph/MON
gpgcheck=0
[osd]
name=osd
baseurl=ftp://192.168.4.10/ceph/OSD
gpgcheck=0
[tools]
name=tools
baseurl=ftp://192.168.4.10/ceph/Tools
gpgcheck=0
[root@node1 ~]# for i in 41 42 43 ; do scp /etc/yum.repos.d/ceph.repo 192.168.2.$i:/etc/yum.repos.d/; done
[root@node1 ~]# yum repolist
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirror.bit.edu.cn
* extras: mirrors.huaweicloud.com
* updates: mirror.bit.edu.cn
mon | 4.1 kB 00:00:00
(1/2): mon/group_gz | 489 B 00:00:00
(2/2): mon/primary_db | 40 kB 00:00:00
源标识 源名称 状态
base/7/x86_64 CentOS-7 - Base 10,070
extras/7/x86_64 CentOS-7 - Extras 397
mon mon 41
osd osd 28
tools tools 33
updates/7/x86_64 CentOS-7 - Updates 760
repolist: 11,329
5)配置NTP时间同步,默认192.168.2.254为NTP服务器
[root@node1 ~]# vim /etc/chrony.conf
... ...
server 192.168.2.254 iburst
[root@node1 ~]# for i in 41 42 43
> do
> scp /etc/chrony.conf 192.168.2.$i:/etc/
> ssh 192.168.2.$i "systemctl restart chronyd"
> done
为每台虚拟机添加两块20G的磁盘
6)给node1主机装ceph-deploy,创建目录
[root@node1 ~]# yum install -y ceph-deploy
[root@node1 ~]# mkdir ceph-cluster
[root@node1 ~]# cd ceph-cluster/
[root@node1 ceph-cluster]# for i in node1 node2 node3; do ssh $i "yum install ceph-mon ceph-osd ceph-mds"; done ==>>给所有ceph节点安装ceph相关包
7)初始化mon服务
[root@node1 ceph-cluster]# ceph-deploy new node1 node2 node3
[root@node1 ceph-cluster]# ceph-deploy mon create-initial
[root@node1 ceph-cluster]# ceph -s
8)准备磁盘分区,创建journal盘,并永久修改设备权限
[root@node1 ceph-cluster]# for i in node1 node2 node3
> do
> ssh $i "parted /dev/vdb mklabel gpt"
> ssh $i "parted /dev/vdb mkpart primary 1 100%"
> done
9)在node1、node2、node3上面操作
[root@node1 ceph-cluster]# chown ceph.ceph /dev/vdb1 ==>>临时修改权限
[root@node1 ceph-cluster]# vim /etc/udev/rules.d/70-vdb.rules
ENV{DEVNAME}=="/dev/vdb1",OWNER="ceph",GROUP="ceph"
10)使用ceph-deploy工具初始化数据磁盘(仅node1操作)
[root@node1 ceph-cluster]# ceph-deploy disk zap node1:vdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node2:vdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node3:vdc
11)初始化OSD集群
[root@node1 ceph-cluster]# ceph-deploy osd create node1:vdc:/dev/vdb1 ==>>创建osd存储设备,vdc为集群提供存储空间,vdb1提供JOURNAL缓存,一个存储设备对应一个缓存设备,缓存需要SSD,不需要很大
[root@node1 ceph-cluster]# ceph-deploy osd create node2:vdc:/dev/vdb1
[root@node1 ceph-cluster]# ceph-deploy osd create node3:vdc:/dev/vdb1
[root@node1 ceph-cluster]# ceph -s ==>>查看集群状态
11)部署ceph文件系统
[root@node1 ceph-cluster]# ceph-deploy mds create node3 ==>>启动mds服务
[root@node1 ceph-cluster]# ceph osd pool create cephfs_data 128 ==>>创建存储池
[root@node1 ceph-cluster]# ceph osd pool create cephfs_metadata 128
[root@node1 ceph-cluster]# ceph osd lspools
12)创建文件系统
[root@node1 ceph-cluster]# ceph fs new myfsl cephfs_metadata cephfs_data
[root@node1 ceph-cluster]# ceph osd lspools
13)卸载web1、web2、web3的NFS共享
[root@web1 ~]# /usr/local/nginx/sbin/nginx -s stop
[root@web1 ~]# umount /usr/local/nginx/html/
[root@web1 ~]# vim /etc/fstab
#192.168.2.31:/web_share/html /usr/local/nginx/html nfs defaults 0 0
14)web服务器永久挂载ceph文件系统在web1、web2、web3上面操作
第一种方案:
[root@node1 ceph-cluster]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = safweafwa646464FEWFWEF==
[root@web1 ~]# mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html -o name=admin,secert=safweafwa646464FEWFWEF==
[root@web1 ~]#echo ‘mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html -o name=admin,secert=safweafwa646464FEWFWEF==’ >>/etc/rc.local
[root@web1 ~]# chmod +x /etc/rc.local
第二种方案,通过fstab永久挂载,需要安装libcephfs1软件包
[root@web1 ~]# yum install -y libcephfs1
[root@web1 ~]# vim /etc/fstab
... ...
192.168.2.41:6789:/ /usr/local/nginx/html/ ceph defaults,_netdev,name=admin,secert=safweafwa646464FEWFWEF== 0 0
第三种方案 ,对于高可用的问题,可以mount时同时写入多个IP
[root@web1 ~]# mount -t ceph 192.168.2.41:6789,192.168.2.42:6789,192.168.3.41:6789:/ /usr/local/nginx/html -o name=admin,secert=safweafwa646464FEWFWEF== ==>>临时修改
[root@web1 ~]# vim /etc/fstab
... ...
192.168.2.41:6789,192.168.2.42:6789,192.168.3.41:6789:/ /usr/local/nginx/html/ ceph defaults,_netdev,name=admin,secert=safweafwa646464FEWFWEF== 0 0 ==>>永久修改
15)数据nfs迁移到ceph
[root@nfs ~]# tar -czpf html.tar.gz /usr/local/nginx/html/
[root@nfs ~]# scp html.tar.gz 192.168.2.11:/usr/local/nginx/html/
[root@web1 ~]# cd /usr/local/nginx/html/
[root@web1 html]# tar -xf html.tar.gz
[root@web1 html]# rm -fr html.tar.gz
16)恢复web服务web1、web2、web3
[root@web1 ~]# /usr/local/nginx/sbin/nginx