使用ISCSI+Multipath配置共享存储

环境说明

VMware下CentOS7,额外添加一块硬盘具体如下:

[root@orcl1 dev]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0  200G  0 disk
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0  199G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0    2G  0 lvm  [SWAP]
  └─centos-home 253:2    0  147G  0 lvm  /home
sdb               8:16   0   20G  0 disk
sdc               8:32   0  100G  0 disk
sr0              11:0    1 1024M  0 rom 

需要制作ASM磁盘组,分别为DATA→60G,OCR→5G,FRI→15G,REDO→10G*2

磁盘分区

[root@orcl1 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0  200G  0 disk
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0  199G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0    2G  0 lvm  [SWAP]
  └─centos-home 253:2    0  147G  0 lvm  /home
sdb               8:16   0   20G  0 disk
sdc               8:32   0  100G  0 disk
├─sdc1            8:33   0   50G  0 part
├─sdc2            8:34   0    1K  0 part
├─sdc5            8:37   0   10G  0 part
├─sdc6            8:38   0   10G  0 part
├─sdc7            8:39   0   15G  0 part
└─sdc8            8:40   0    5G  0 part
sr0              11:0    1 1024M  0 rom  

部署ISCSI

#在需要共享存储的节点**安装**以下包
yum install epel-release
yum install scsi-target-utils iscsi-initiator-utils –y
eg:
[root@orcl1 ~]# systemctl start tgtd
[root@orcl1 ~]# systemctl enable tgtd
Created symlink from /etc/systemd/system/multi-user.target.wants/tgtd.service to /usr/lib/systemd/system/tgtd.service.
[root@orcl1 ~]# systemctl start iscsid
[root@orcl1 ~]# systemctl enable iscsid
Created symlink from /etc/systemd/system/multi-user.target.wants/iscsid.service to /usr/lib/systemd/system/iscsid.service. 
[root@orcl2 ~]#  systemctl start tgtd
[root@orcl2 ~]# systemctl enable tgtd
Created symlink from /etc/systemd/system/multi-user.target.wants/tgtd.service to /usr/lib/systemd/system/tgtd.service.
[root@orcl2 ~]# systemctl start iscsid
[root@orcl2 ~]# systemctl enable iscsid
Created symlink from /etc/systemd/system/multi-user.target.wants/iscsid.service to /usr/lib/systemd/system/iscsid.service. 

#在服务端**创建target**,这里服务端为orcl1
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op new --tid 01 --targetname ocr
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op new --tid 02 --targetname data
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op new --tid 03 --targetname fri
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op new --tid 04 --targetname redo1
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op new --tid 05 --targetname redo2

#在服务端**添加LUN**
[root@orcl1 ~]# tgtadm --lld iscsi --mode logicalunit --op new --tid 01 --lun 01 --backing-store /dev/sdc8
[root@orcl1 ~]# tgtadm --lld iscsi --mode logicalunit --op new --tid 02 --lun 02 --backing-store /dev/sdc1
[root@orcl1 ~]# tgtadm --lld iscsi --mode logicalunit --op new --tid 03 --lun 03 --backing-store /dev/sdc7
[root@orcl1 ~]# tgtadm --lld iscsi --mode logicalunit --op new --tid 04 --lun 04 --backing-store /dev/sdc5
[root@orcl1 ~]# tgtadm --lld iscsi --mode logicalunit --op new --tid 05 --lun 05 --backing-store /dev/sdc6

#服务端**添加绑定**
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op bind --tid 01 --initiator-address ALL
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op bind --tid 02 --initiator-address ALL
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op bind --tid 03 --initiator-address ALL
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op bind --tid 04 --initiator-address ALL
[root@orcl1 ~]# tgtadm --lld iscsi --mode target --op bind --tid 05 --initiator-address ALL

#生成配置文件,避免重启失效
tgt-admin --dump > /etc/tgt/targets.conf 

#各个节点**进行侦测**测试
[root@orcl1 ~]# iscsiadm -m discovery -t sendtargets -p  192.168.103.75
192.168.103.75:3260,1 ocr
192.168.103.75:3260,1 data
192.168.103.75:3260,1 fri
192.168.103.75:3260,1 redo1
192.168.103.75:3260,1 redo2
[root@orcl2 ~]# iscsiadm -m discovery -t sendtargets -p  192.168.103.75
192.168.103.75:3260,1 ocr
192.168.103.75:3260,1 data
192.168.103.75:3260,1 fri
192.168.103.75:3260,1 redo1
192.168.103.75:3260,1 redo2

#侦测正常后**进行连接**,两个节点执行
[root@orcl1 ~]# iscsiadm --mode node --targetname ocr --portal 192.168.103.75:3260 --login
Logging in to [iface: default, target: ocr, portal: 192.168.103.75,3260] (multiple)
iscsiadm --mode node --targetname data --portal 192.168.103.75:3260 --login
Login to [iface: default, target: ocr, portal: 192.168.103.75,3260] successful.
[root@orcl1 ~]# iscsiadm --mode node --targetname data --portal 192.168.103.75:3260 --login
Logging in to [iface: default, target: data, portal: 192.168.103.75,3260] (multiple)
Login to [iface: default, target: data, portal: 192.168.103.75,3260] successful.
[root@orcl1 ~]# iscsiadm --mode node --targetname fri --portal 192.168.103.75:3260 --login
Logging in to [iface: default, target: fri, portal: 192.168.103.75,3260] (multiple)
Login to [iface: default, target: fri, portal: 192.168.103.75,3260] successful.
[root@orcl1 ~]# iscsiadm --mode node --targetname redo1 --portal 192.168.103.75:3260 --login
Logging in to [iface: default, target: redo1, portal: 192.168.103.75,3260] (multiple)
Login to [iface: default, target: redo1, portal: 192.168.103.75,3260] successful.
[root@orcl1 ~]# iscsiadm --mode node --targetname redo2 --portal 192.168.103.75:3260 --login
Logging in to [iface: default, target: redo2, portal: 192.168.103.75,3260] (multiple)
Login to [iface: default, target: redo2, portal: 192.168.103.75,3260] successful.
[root@orcl1 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0  200G  0 disk
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0  199G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0    2G  0 lvm  [SWAP]
  └─centos-home 253:2    0  147G  0 lvm  /home
sdb               8:16   0   20G  0 disk
sdc               8:32   0  100G  0 disk
├─sdc1            8:33   0   50G  0 part
├─sdc2            8:34   0    1K  0 part
├─sdc5            8:37   0   10G  0 part
├─sdc6            8:38   0   10G  0 part
├─sdc7            8:39   0   15G  0 part
└─sdc8            8:40   0    5G  0 part
sdd               8:48   0    5G  0 disk
sde               8:64   0   50G  0 disk
sdf               8:80   0   15G  0 disk
sdg               8:96   0   10G  0 disk
sdh               8:112  0   10G  0 disk
sr0              11:0    1 1024M  0 rom   这里可能会出现盘符和大小两个节点对应不上的问题,具体解决方式在下面

部署Multipath

经过iscsi构造的共享磁盘也就是上面的sdd到sdh,在asm中需要建立为多路径磁盘,以下操作在两个节点执行

#**安装**软件包,两个节点执行
yum -y install device-mapper-* device-mapper-multipath-* device-mapper-1* 

#**启动**多路径,并且执行命令自动创建模板
[root@orcl1 ~]# /etc/init.d/multipathd start
Starting multipathd (via systemctl):                       [  OK  ]

#编辑多路径**配置文件**
[root@orcl1 ~]# vim /etc/multipath.conf 
multipaths {
   multipath {
           wwid "360000000000000000e00000000010001"
           alias ocr
             }
   multipath {
           wwid "360000000000000000e00000000020002"
           alias data
             }
   multipath {
           wwid "360000000000000000e00000000030003"
           alias fri
             }
   multipath {
           wwid "360000000000000000e00000000040004"
           alias redo1
             }
   multipath {
           wwid "360000000000000000e00000000050005"
           alias redo2
             }
}
注:这里的wwid获取方式有很多种,只是作为标识区分的要求并不严格,使用scsi_id编号区分也是可以的,但是会出现别名不生效的问题,故恢复上面的编码方式。查看scsi_id可以使用[root@orcl1 ~]#  tgt-admin --show       

#重启multipath服务,并查看多路径情况
[root@orcl1 ~]# /etc/init.d/multipathd restart
Restarting multipathd (via systemctl):                     [  OK  ]
[root@orcl1 ~]# multipath -ll
redo2 (360000000000000000e00000000050005) dm-7 IET     ,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 7:0:0:5 sdh 8:112 active ready running
fri (360000000000000000e00000000030003) dm-5 IET     ,VIRTUAL-DISK
size=15G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 5:0:0:3 sdf 8:80  active ready running
redo1 (360000000000000000e00000000040004) dm-6 IET     ,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 6:0:0:4 sdg 8:96  active ready running
data (360000000000000000e00000000020002) dm-4 IET     ,VIRTUAL-DISK
size=50G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 4:0:0:2 sde 8:64  active ready running
ocr (360000000000000000e00000000010001) dm-3 IET     ,VIRTUAL-DISK
size=5.0G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
  `- 3:0:0:1 sdd 8:48  active ready running                
[root@orcl1 ~]#  ll /dev/mapper/
total 0
lrwxrwxrwx 1 root root       7 Feb  3 19:02 centos-home -> ../dm-2
lrwxrwxrwx 1 root root       7 Feb  3 19:02 centos-root -> ../dm-0
lrwxrwxrwx 1 root root       7 Feb  3 19:02 centos-swap -> ../dm-1
crw------- 1 root root 10, 236 Feb  3 19:02 control
lrwxrwxrwx 1 root root       7 Feb  3 19:23 data -> ../dm-4
lrwxrwxrwx 1 root root       7 Feb  3 19:23 fri -> ../dm-5
lrwxrwxrwx 1 root root       7 Feb  3 19:23 ocr -> ../dm-3
lrwxrwxrwx 1 root root       7 Feb  3 19:23 redo1 -> ../dm-6
lrwxrwxrwx 1 root root       7 Feb  3 19:23 redo2 -> ../dm-7

绑盘操作

以下操作两个节点执行

#编辑udev规则文件
[root@orcl1 rules.d]# vim 12-dm-permissions.rules
ENV{DM_NAME}=="ocr", OWNER:="grid", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="data", OWNER:="grid", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="fri", OWNER:="grid", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="redo1", OWNER:="grid", GROUP:="oinstall", MODE:="660"
ENV{DM_NAME}=="redo2", OWNER:="grid", GROUP:="oinstall", MODE:="660" 

#使生效
[root@orcl1 rules.d]# udevadm control --reload-rules
[root@orcl1 rules.d]# udevadm trigger 

至此整个ASM存储部署完成

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值