服务端
安装
yum install iscsi-initiator-utils
yum install scsi-target-utils
yum install lsscsi
service iscsi start
service iscsid start
service tgtd start
chkconfig iscsi on
chkconfig iscsid on
chkconfig tgtd on
service iscsid start
service tgtd start
chkconfig iscsi on
chkconfig iscsid on
chkconfig tgtd on
配置
sdb,sdc,sdd三个磁盘,允许所有客户端访问
tgtadm --lld iscsi --mode target --op new --tid 1 --targetname disk0
tgtadm --lld iscsi --mode target --op new --tid 2 --targetname disk1
tgtadm --lld iscsi --mode target --op new --tid 3 --targetname disk2
tgtadm --lld iscsi --mode logicalunit --op new --tid 1 --lun 1 --backing-store /dev/sdb
tgtadm --lld iscsi --mode logicalunit --op new --tid 2 --lun 2 --backing-store /dev/sdc
tgtadm --lld iscsi --mode logicalunit --op new --tid 3 --lun 3 --backing-store /dev/sdd
tgtadm --lld iscsi --mode target --op bind --tid 1 --initiator-address ALL
tgtadm --lld iscsi --mode target --op bind --tid 2 --initiator-address ALL
tgtadm --lld iscsi --mode target --op bind --tid 3 --initiator-address ALL
生成配置文件
tgt-admin --dump > /etc/tgt/targets.conf
tgtadm --lld iscsi --mode target --op new --tid 2 --targetname disk1
tgtadm --lld iscsi --mode target --op new --tid 3 --targetname disk2
tgtadm --lld iscsi --mode logicalunit --op new --tid 1 --lun 1 --backing-store /dev/sdb
tgtadm --lld iscsi --mode logicalunit --op new --tid 2 --lun 2 --backing-store /dev/sdc
tgtadm --lld iscsi --mode logicalunit --op new --tid 3 --lun 3 --backing-store /dev/sdd
tgtadm --lld iscsi --mode target --op bind --tid 1 --initiator-address ALL
tgtadm --lld iscsi --mode target --op bind --tid 2 --initiator-address ALL
tgtadm --lld iscsi --mode target --op bind --tid 3 --initiator-address ALL
生成配置文件
tgt-admin --dump > /etc/tgt/targets.conf
启动服务
...
客户端
发现磁盘
[root@db10 ~]# iscsiadm -m discovery -t sendtargets -p 1.1.1.252
Starting iscsid: [ OK ]
1.1.1.252:3260,1 disk0
1.1.1.252:3260,1 disk1
1.1.1.252:3260,1 disk2
1.1.1.252:3260,1 disk0
1.1.1.252:3260,1 disk1
1.1.1.252:3260,1 disk2
登陆iscsi
(把--login改成--logout就是取消)
iscsiadm --mode node --targetname disk0 --portal
1.1.1.252:3260 --login
iscsiadm --mode node --targetname disk1 --portal 1.1.1.252:3260 --login
iscsiadm --mode node --targetname disk2 --portal 1.1.1.252:3260 --login
iscsiadm --mode node --targetname disk1 --portal 1.1.1.252:3260 --login
iscsiadm --mode node --targetname disk2 --portal 1.1.1.252:3260 --login
[root@db10 ~]# fdisk -l | grep "Disk /"
Disk /dev/sda: 34.4 GB, 34359738368 bytes
Disk /dev/sdb: 8589 MB, 8589934592 bytes
Disk /dev/sdc: 8589 MB, 8589934592 bytes
Disk /dev/sdd: 8589 MB, 8589934592 bytes
多路径
用iscsiadm登陆另外的ip。
iscsiadm --mode node --targetname disk0 --portal
2.2.2.252:3260 --login
iscsiadm --mode node --targetname disk1 --portal 2.2.2.252:3260 --login
iscsiadm --mode node --targetname disk2 --portal 2.2.2.252:3260 --login
iscsiadm --mode node --targetname disk1 --portal 2.2.2.252:3260 --login
iscsiadm --mode node --targetname disk2 --portal 2.2.2.252:3260 --login
看见了重复的sdN
[root@db10 ~]# fdisk -l | grep "Disk /"
Disk /dev/sda: 34.4 GB, 34359738368 bytes
Disk /dev/sdb: 8589 MB, 8589934592 bytes
Disk /dev/sdc: 8589 MB, 8589934592 bytes
Disk /dev/sdd: 8589 MB, 8589934592 bytes
Disk /dev/sde: 8589 MB, 8589934592 bytes
Disk /dev/sdf: 8589 MB, 8589934592 bytes
Disk /dev/sdg: 8589 MB, 8589934592 bytes
Disk /dev/sda: 34.4 GB, 34359738368 bytes
Disk /dev/sdb: 8589 MB, 8589934592 bytes
Disk /dev/sdc: 8589 MB, 8589934592 bytes
Disk /dev/sdd: 8589 MB, 8589934592 bytes
Disk /dev/sde: 8589 MB, 8589934592 bytes
Disk /dev/sdf: 8589 MB, 8589934592 bytes
Disk /dev/sdg: 8589 MB, 8589934592 bytes
[root@db10 ~]# scsi_id -u -g /dev/sdb
1IET_00010001
[root@db10 ~]# scsi_id -u -g /dev/sde
1IET_00010001
1IET_00010001
[root@db10 ~]# scsi_id -u -g /dev/sde
1IET_00010001
安装多路径软件
yum -y install device-mapper*
chkconfig multipathd on
service multipathd start
service multipathd start
配置映射
创建配置文件
[root@db10 ~]# cat /etc/multipath.conf
defaults {
udev_dir /dev
polling_interval 10
path_selector "round-robin 0"
path_grouping_policy multibus
getuid_callout "/lib/udev/scsi_id --whitelisted --replace-whitespace -g -u -d /dev/%n"
prio const
path_checker readsector0
rr_min_io 100
max_fds 8192
rr_weight priorities
failback immediate
no_path_retry fail
user_friendly_names yes
}
blacklist {
wwid 1ATA_VBOX_HARDDISK_VB2271fad5-dee44b6d
}
multipaths {
multipath {
wwid 1IET_00010001
alias mpath_0
}
multipath {
wwid 1IET_00020001
alias mpath_1
}
multipath {
wwid 1IET_00030001
alias mpath_2
}
}
devices {
device {
vendor IET
product VIRTUAL-DISK
}
}
udev_dir /dev
polling_interval 10
path_selector "round-robin 0"
path_grouping_policy multibus
getuid_callout "/lib/udev/scsi_id --whitelisted --replace-whitespace -g -u -d /dev/%n"
prio const
path_checker readsector0
rr_min_io 100
max_fds 8192
rr_weight priorities
failback immediate
no_path_retry fail
user_friendly_names yes
}
blacklist {
wwid 1ATA_VBOX_HARDDISK_VB2271fad5-dee44b6d
}
multipaths {
multipath {
wwid 1IET_00010001
alias mpath_0
}
multipath {
wwid 1IET_00020001
alias mpath_1
}
multipath {
wwid 1IET_00030001
alias mpath_2
}
}
devices {
device {
vendor IET
product VIRTUAL-DISK
}
}
说明:
defaults里是其它配置的默认参数,供其它配置集成。
polling_interval:链路的检查间隔,单位是秒。默认为5秒。检查间隔会逐步增加到4*polling_interval。
selector:
round-robin 0:向每个通道分配等量IO;
queue-length 0:向通道发生最小的未完成IO;
service-time 0:根据每个通道的未完成IO和吞吐量分配新IO;
path_grouping_policy:通道的优先策略
failover 每个组只有一个通道(主备模式);
multibus 所有通道放到一个组(负载均衡);
group_by_serial 按serial number分组;
group_by_prio 按“prio”参数指定的值来分组;
group_by_node_name 按/sys/class/fc_transport/target*/node_name的名称分组
默认策略是multibus
multibus 所有通道放到一个组(负载均衡);
group_by_serial 按serial number分组;
group_by_prio 按“prio”参数指定的值来分组;
group_by_node_name 按/sys/class/fc_transport/target*/node_name的名称分组
默认策略是multibus
getuid_callout:指定识别设备UID使用的命令
prio:决定通道优先级的方式,const表示所有通道优先级一样,也是默认的方式。其它的还有emc,alua,tpg_pref,ontap,rdac,hp_sw,hds,都是针对不同厂商的产品的。
path_checker:通道状态的检查方式:
readsector0:读取设备的第一个sector;
tur:向设备发生TEST UNIT READY命令;
directio:通过direct I/O读取设备的第一个sector;
emc_clariion,hp_sw,rdac这三个是针对特定厂商产品的;
默认值是readsector0。
readsector0:读取设备的第一个sector;
tur:向设备发生TEST UNIT READY命令;
directio:通过direct I/O读取设备的第一个sector;
emc_clariion,hp_sw,rdac这三个是针对特定厂商产品的;
默认值是readsector0。
rr_min_io:在一个优先级组的多个通道之间分配IO时,每次给一个通道分配的IO的最小次数。相当于IO分配的颗粒度。默认为1。
这个参数值针对 2.6.31之前的内核,新内核需要设置rr_min_io_rq,其默认为1000。
max_fds:指定multipath和multipathd能打开的文件描述符数量。这个值和ulimit -n相关。
/proc/sys/fs/nr_open显示的是系统能打开的文件描述符数。
如果没有设置max_fds,multipath和multipathd能打开的文件描述符数量就取决于调用它们的进程。
通常是1024. 如果通道数量乘以32大于1024,最好是设置为通道数量*32。
/proc/sys/fs/nr_open显示的是系统能打开的文件描述符数。
如果没有设置max_fds,multipath和multipathd能打开的文件描述符数量就取决于调用它们的进程。
通常是1024. 如果通道数量乘以32大于1024,最好是设置为通道数量*32。
blacklist是忽略哪些磁盘,比如本地磁盘
multipaths里的每个multipath是一个映射,指定wwid和alias。
wwid就是defaults里getuid_callout指定的命令的返回值。相当于绑定uuid
alias是别名,映射好的设备会出现在/dev/mapper里。
devices指定的是设备类型
vendor: 取自/sys/block/device_name/device/vendor
product: 取自/sys/block/device_name/device/model
product: 取自/sys/block/device_name/device/model
启动multipathd服务后,message日志:
Apr 3 14:54:59 db10 kernel: device-mapper: multipath round-robin: version 1.0.0 loaded
Apr 3 14:54:59 db10 multipathd: mpath_0: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:16 1 8:64 1]
Apr 3 14:54:59 db10 multipathd: mpath_1: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:32 1 8:80 1]
Apr 3 14:54:59 db10 multipathd: mpath_2: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:48 1 8:96 1]
Apr 3 14:54:59 db10 multipathd: mpath_0: event checker started
Apr 3 14:54:59 db10 multipathd: mpath_1: event checker started
Apr 3 14:54:59 db10 multipathd: mpath_2: event checker started
Apr 3 14:54:59 db10 multipathd: path checkers start up
Apr 3 14:54:59 db10 multipathd: mpath_0: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:16 1 8:64 1]
Apr 3 14:54:59 db10 multipathd: mpath_1: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:32 1 8:80 1]
Apr 3 14:54:59 db10 multipathd: mpath_2: load table [0 16777216 multipath 0 0 1 1 round-robin 0 2 1 8:48 1 8:96 1]
Apr 3 14:54:59 db10 multipathd: mpath_0: event checker started
Apr 3 14:54:59 db10 multipathd: mpath_1: event checker started
Apr 3 14:54:59 db10 multipathd: mpath_2: event checker started
Apr 3 14:54:59 db10 multipathd: path checkers start up
映射结果
[root@db10 device]# ll /dev/mapper/
total 0
crw-rw---- 1 root root 10, 58 Apr 3 10:25 control
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_0 -> ../dm-0
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_1 -> ../dm-1
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_2 -> ../dm-2
total 0
crw-rw---- 1 root root 10, 58 Apr 3 10:25 control
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_0 -> ../dm-0
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_1 -> ../dm-1
lrwxrwxrwx 1 root root 7 Apr 3 14:54 mpath_2 -> ../dm-2
[root@db10 ~]# multipath -ll
mpath_2 (
1IET_00030001) dm-2 IET,VIRTUAL-DISK
size=8.0G features='0' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
|- 5:0:0:1
sdd
8:48 active ready running
`- 8:0:0:1
sdg
8:96 active ready running
mpath_1 (
1IET_00020001) dm-1 IET,VIRTUAL-DISK
size=8.0G features='0' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
|- 4:0:0:1
sdc
8:32 active ready running
`- 7:0:0:1
sdf
8:80 active ready running
mpath_0 (
1IET_00010001) dm-0 IET,VIRTUAL-DISK
size=8.0G features='0' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
|- 3:0:0:1
sdb
8:16 active ready running
`- 6:0:0:1
sde
8:64 active ready running
[root@db10 ~]# fdisk -l | grep "Disk /"
Disk /dev/sda: 34.4 GB, 34359738368 bytes
Disk /dev/sdb: 8589 MB, 8589934592 bytes
Disk /dev/sdc: 8589 MB, 8589934592 bytes
Disk /dev/sdd: 8589 MB, 8589934592 bytes
Disk /dev/sde: 8589 MB, 8589934592 bytes
Disk /dev/sdf: 8589 MB, 8589934592 bytes
Disk /dev/sdg: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_0: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_1: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_2: 8589 MB, 8589934592 bytes
Disk /dev/sda: 34.4 GB, 34359738368 bytes
Disk /dev/sdb: 8589 MB, 8589934592 bytes
Disk /dev/sdc: 8589 MB, 8589934592 bytes
Disk /dev/sdd: 8589 MB, 8589934592 bytes
Disk /dev/sde: 8589 MB, 8589934592 bytes
Disk /dev/sdf: 8589 MB, 8589934592 bytes
Disk /dev/sdg: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_0: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_1: 8589 MB, 8589934592 bytes
Disk /dev/mapper/mpath_2: 8589 MB, 8589934592 bytes
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/26239116/viewspace-1485362/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/26239116/viewspace-1485362/