系统SCAN磁盘 | echo "1" > /sys/class/fc_host/host3/issue_lip echo "1" > /sys/class/fc_host/host4/issue_lip echo "1" > /sys/class/fc_host/host2/issue_lip echo "1" > /sys/class/fc_host/host1/issue_lip echo "0" > /sys/class/fc_host/host1/issue_lip echo "0" > /sys/class/fc_host/host2/issue_lip echo "0" > /sys/class/fc_host/host3/issue_lip echo "0" > /sys/class/fc_host/host4/issue_lip |
multiapth 配置 | [root@localhost fc_host]# /sbin/mpathconf multipath is enabled find_multipaths is disabled user_friendly_names is enabled dm_multipath module is not loaded multipathd is chkconfiged off [root@localhost fc_host]# mpathconf --enable --with_multipathd y Starting multipathd daemon: [ OK ] [root@localhost fc_host]# mpathconf --enable [root@localhost fc_host]# service multipathd restart ok Stopping multipathd daemon: [ OK ] Starting multipathd daemon: [ OK ] [root@localhost fc_host]# mpathconf --enable --user_friendly_names y --find_multipaths y [root@localhost fc_host]# multipath -v2 [root@localhost fc_host]# multipath -ll |
系统识别磁盘资源 | [root@localhost fc_host]# multipath -ll mpathe (360060e801256a800504056a800000203) dm-6 HITACHI,OPEN-V size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 1:0:0:3 sde 8:64 active ready running |- 3:0:0:3 sdj 8:144 active ready running |- 1:0:1:3 sdt 65:48 active ready running `- 3:0:1:3 sdo 8:224 active ready running mpathd (360060e801256a800504056a800000202) dm-5 HITACHI,OPEN-V size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 1:0:0:2 sdd 8:48 active ready running |- 3:0:0:2 sdi 8:128 active ready running |- 1:0:1:2 sds 65:32 active ready running `- 3:0:1:2 sdn 8:208 active ready running mpathc (360060e801256a800504056a800000201) dm-4 HITACHI,OPEN-V size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 1:0:0:1 sdc 8:32 active ready running |- 3:0:0:1 sdh 8:112 active ready running |- 1:0:1:1 sdr 65:16 active ready running `- 3:0:1:1 sdm 8:192 active ready running mpathb (360060e801256a800504056a800000200) dm-3 HITACHI,OPEN-V size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 1:0:0:0 sdb 8:16 active ready running |- 3:0:0:0 sdg 8:96 active ready running |- 1:0:1:0 sdq 65:0 active ready running `- 3:0:1:0 sdl 8:176 active ready running mpathf (360060e801256a800504056a800000204) dm-7 HITACHI,OPEN-V size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 1:0:0:4 sdf 8:80 active ready running |- 3:0:0:4 sdk 8:160 active ready running |- 1:0:1:4 sdu 65:64 active ready running `- 3:0:1:4 sdp 8:240 active ready running [root@localhost fc_host]# |
创建PV | [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvcreate /dev/mapper/^C [root@localhost fc_host]# pvcreate /dev/mapper/mpathb Physical volume "/dev/mapper/mpathb" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvcreate /dev/mapper/mpathc Physical volume "/dev/mapper/mpathc" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvcreate /dev/mapper/mpathd Physical volume "/dev/mapper/mpathd" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvcreate /dev/mapper/mpathe Physical volume "/dev/mapper/mpathe" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvcreate /dev/mapper/mpathf Physical volume "/dev/mapper/mpathf" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# pvscan PV /dev/sda2 VG VolGroup lvm2 [1.09 TiB / 0 free] PV /dev/mapper/mpathb lvm2 [1.95 TiB] PV /dev/mapper/mpathc lvm2 [1.95 TiB] PV /dev/mapper/mpathd lvm2 [1.95 TiB] PV /dev/mapper/mpathe lvm2 [1.95 TiB] PV /dev/mapper/mpathf lvm2 [1.95 TiB] Total: 6 [10.86 TiB] / in use: 1 [1.09 TiB] / in no VG: 5 [9.77 TiB] [root@localhost fc_host]# |
创建VG | [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgcreate Oracle /dev/mapper/mpathb Volume group "Oracle" successfully created [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgextend Oracle /dev/mapper/mpathc Volume group "Oracle" successfully extended [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgextend Oracle /dev/mapper/mpathd Volume group "Oracle" successfully extended [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgextend Oracle /dev/mapper/mpathe Volume group "Oracle" successfully extended [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgextend Oracle /dev/mapper/mpathf Volume group "Oracle" successfully extended [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# |
创建LV | [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# vgs VG #PV #LV #SN Attr VSize VFree Oracle 5 0 0 wz--n- 9.77t 9.77t VolGroup 1 3 0 wz--n- 1.09t 0 [root@localhost fc_host]# vgscan Reading all physical volumes. This may take a while... Found volume group "Oracle" using metadata type lvm2 Found volume group "VolGroup" using metadata type lvm2 [root@localhost fc_host]# [root@localhost fc_host]# [root@localhost fc_host]# lvextend -l +100%FREE /dev/mapper/VG2-lv_oradata Size of logical volume VG2/lv_oradata changed from 800.00 GiB (204799 extents) to 1.07 TiB (281598 extents). Logical volume lv_oradata successfully resized. # resize2fs -f /dev/mapper/VG2-lv_oradata resize2fs 1.41.12 (17-May-2010) Filesystem at /dev/mapper/VG2-lv_oradata is mounted on /oradata; on-line resizing required old desc_blocks = 50, new_desc_blocks = 69 Performing an on-line resize of /dev/mapper/VG2-lv_oradata to 288356352 (4k) blocks. The filesystem on /dev/mapper/VG2-lv_oradata is now 288356352 blocks long. |
LVM管理存储磁盘操作过程
最新推荐文章于 2022-01-07 10:02:20 发布