LVM——磁盘创建及动态扩容

一.利用虚拟磁盘创建LVM

1.创建虚拟磁盘

[root@node3 ~]# ll /dev/sd* -l

brw-rw---- 1 root disk 8, 0 12月 31 09:50 /dev/sda

brw-rw---- 1 root disk 8, 1 12月 31 09:50 /dev/sda1

brw-rw---- 1 root disk 8, 2 12月 31 09:50 /dev/sda2

[root@node3 ~]# mkdir /virtual_disk

[root@node3 ~]# dd if=/dev/zero of=/virtual_disk/lvdisk1.img bs=1M count=1000

记录了1000+0 的读入

记录了1000+0 的写出

1048576000字节(1.0 GB)已复制,8.05385 秒,130 MB/秒

[root@node3 ~]# dd if=/dev/zero of=/virtual_disk/lvdisk2.img bs=1M count=1000

[root@node3 ~]# dd if=/dev/zero of=/virtual_disk/lvdisk3.img bs=1M count=1000

[root@node3 ~]# dd if=/dev/zero of=/virtual_disk/lvdisk4.img bs=1M count=1000

 

[root@node3 ~]# ll -ht /virtual_disk/

总用量 3.0G

-rw-r--r-- 1 root root 1000M 1月   8 11:01 lvdisk3.img

-rw-r--r-- 1 root root 1000M 1月   8 11:01 lvdisk2.img

-rw-r--r-- 1 root root 1000M 1月   8 11:00 lvdisk1.img

 

[root@node3 ~]# losetup /dev/loop1 /virtual_disk/lvdisk1.img   #挂载回环设备

[root@node3 ~]# losetup /dev/loop2 /virtual_disk/lvdisk2.img

[root@node3 ~]# losetup /dev/loop3 /virtual_disk/lvdisk3.img

[root@node3 ~]# losetup /dev/loop4 /virtual_disk/lvdisk4.img

 

[root@node3 ~]# losetup –f  #查看未使用的回环设备

/dev/loop0

 

[root@node3 ~]# losetup –a  #查看所以已经使用的回环设备

/dev/loop1: [fd00]:131075 (/virtual_disk/lvdisk1.img)

/dev/loop2: [fd00]:131076 (/virtual_disk/lvdisk2.img)

/dev/loop3: [fd00]:131077 (/virtual_disk/lvdisk3.img)

/dev/loop4: [fd00]:131078 (/virtual_disk/lvdisk4.img)

 

[root@node3 ~]# losetup -d /dev/loop4              #卸载回环设备

[root@node3 ~]# losetup -a

/dev/loop1: [fd00]:131075 (/virtual_disk/lvdisk1.img)

/dev/loop2: [fd00]:131076 (/virtual_disk/lvdisk2.img)

/dev/loop3: [fd00]:131077 (/virtual_disk/lvdisk3.img)

 

[root@node3 ~]# losetup /dev/loop4 /virtual_disk/lvdisk4.img       #再次挂载并格式化后使用

[root@node3 ~]# mkfs.ext4 /dev/loop4       #格式化loop4

 

 

[root@node3 ~]# mkdir /loop4_disk

[root@node3 ~]# mount /dev/loop4 /loop4_disk/

[root@node3 ~]# df -h

文件系统              容量  已用  可用 已用%% 挂载点

/dev/mapper/vg_node3-lv_root

                       50G   21G   27G  44% /

tmpfs                 4.0G  367M  3.7G   9% /dev/shm

/dev/sda1             485M   60M  400M  13% /boot

/dev/mapper/vg_node3-lv_home

                       26G  173M   24G   1% /home

/dev/sr0              3.5G  3.5G     0 100% /media/RHEL_6.4 x86_64 Disc 1

/dev/loop4            985M   18M  918M   2% /loop4_disk

 

 

 

 

2.创建PV{loop1~loop3}

[root@node3 ~]# pvcreate /dev/{loop1,loop2,loop3}

  Physical volume "/dev/loop1" successfully created

  Physical volume "/dev/loop2" successfully created

  Physical volume "/dev/loop3" successfully created

[root@node3 ~]# pvscan

  PV /dev/sda2    VG vg_node3        lvm2 [79.51 GiB / 0    free]

  PV /dev/loop1                      lvm2 [1000.00 MiB]

  PV /dev/loop2                      lvm2 [1000.00 MiB]

  PV /dev/loop3                      lvm2 [1000.00 MiB]

  Total: 4 [82.44 GiB] / in use: 1 [79.51 GiB] / in no VG: 3 [2.93 GiB]

 

3.创建VG

[root@node3 ~]# vgcreate vg01_loop /dev/{loop1,loop2}

  Volume group "vg01_loop" successfully created

[root@node3 ~]# vgscan

  Reading all physical volumes.  This may take a while...

  Found volume group "vg_node3" using metadata type lvm2

  Found volume group "vg01_loop" using metadata type lvm2

 

[root@node3 ~]# pvdisplay /dev/{loop1,loop2,loop3}

 

 

 

4.创建LV

[root@node3 ~]# lvcreate -L 1.5G -n lv01_loop vg01_loop

  Logical volume "lv01_loop" created

[root@node3 ~]# lvscan

  ACTIVE            '/dev/vg_node3/lv_root' [50.00 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_home' [25.63 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_swap' [3.88 GiB] inherit

  ACTIVE            '/dev/vg01_loop/lv01_loop' [1.50 GiB] inherit

[root@node3 ~]# lvdisplay vg01_loop/lv01_loop

 

 

[root@node3 ~]# vgdisplay vg01_loop

 

 

5.创建文件系统、挂载

[root@node3 ~]# mkfs.ext4 /dev/vg01_loop/lv01_loop

 

 

[root@node3 ~]# mkdir /looplv

[root@node3 ~]# mount /dev/vg01_loop/lv01_loop /looplv/

[root@node3 ~]# df -h

文件系统              容量  已用  可用 已用%% 挂载点

/dev/mapper/vg_node3-lv_root

                       50G   19G   28G  41% /

tmpfs                 4.0G  367M  3.7G   9% /dev/shm

/dev/sda1             485M   60M  400M  13% /boot

/dev/mapper/vg_node3-lv_home

                       26G  173M   24G   1% /home

/dev/sr0              3.5G  3.5G     0 100% /media/RHEL_6.4 x86_64 Disc 1

/dev/loop4            985M   18M  918M   2% /loop4_disk

/dev/mapper/vg01_loop-lv01_loop

                      1.5G   35M  1.4G   3% /looplv

 

 

 

[root@node3 ~]# yes >/looplv/test       #写入测试

[root@node3 ~]# rm -f /looplv/test     

 

 

6.LVM扩容

 

/dev/loop3 已经创建为PV了,但是还为创建、加入VG,下面添加该设备到vg01_loop中

 

[root@node3 ~]# vgextend vg01_loop /dev/loop3

  Volume group "vg01_loop" successfully extended

 

 

下面扩展lv01_loop大小,先用vgdisplay查看下vg01_loop的剩余空间,然后在扩展

 

 

 

[root@node3 ~]# lvextend -rL +1.4G /dev/vg01_loop/lv01_loop

 

 

 

再次查看vg信息 vgdisplay vg01_loop

 

 

查看lv信息 lvdisplay /dev/vg01_loop/lv01_loop

               lvdisplay --maps /dev/vg01_loop/lv01_loop

 

 

[root@node3 ~]#df -Th

 

 

注:

如果在lv扩容时不加参数-r 如 # lvextend -L +1.4G /dev/vg01_loop/lv01_loop

那么在做完该操作后,还需要resize后才能给系统使用添加的容量

[root@node3 ~]# resize2fs /dev/vg01_loop/lv01_loop           #resize2fs之后

 

 

 

 

 

 

附:

重启系统后,如下:

[root@node3 ~]# vgscan

  Reading all physical volumes.  This may take a while...

  Found volume group "vg_node3" using metadata type lvm2

[root@node3 ~]# pvscan

  PV /dev/sda2   VG vg_node3   lvm2 [79.51 GiB / 0    free]

  Total: 1 [79.51 GiB] / in use: 1 [79.51 GiB] / in no VG: 0 [0   ]

[root@node3 ~]# vgscan

  Reading all physical volumes.  This may take a while...

  Found volume group "vg_node3" using metadata type lvm2

[root@node3 ~]# lvscan

  ACTIVE            '/dev/vg_node3/lv_root' [50.00 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_home' [25.63 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_swap' [3.88 GiB] inherit

[root@node3 ~]# losetup -a

 

[root@node3 ~]# losetup /dev/loop1 /virtual_disk/lvdisk1.img

[root@node3 ~]# losetup /dev/loop2 /virtual_disk/lvdisk2.img

[root@node3 ~]# losetup /dev/loop3 /virtual_disk/lvdisk3.img

[root@node3 ~]# losetup /dev/loop4 /virtual_disk/lvdisk4.img

[root@node3 ~]# losetup -a

/dev/loop1: [fd00]:131075 (/virtual_disk/lvdisk1.img)

/dev/loop2: [fd00]:131076 (/virtual_disk/lvdisk2.img)

/dev/loop3: [fd00]:131077 (/virtual_disk/lvdisk3.img)

/dev/loop4: [fd00]:131078 (/virtual_disk/lvdisk4.img)

 

[root@node3 ~]# pvscan

  PV /dev/sda2    VG vg_node3    lvm2 [79.51 GiB / 0    free]

  PV /dev/loop1   VG vg01_loop   lvm2 [996.00 MiB / 0    free]

  PV /dev/loop2   VG vg01_loop   lvm2 [996.00 MiB / 0    free]

  PV /dev/loop3   VG vg01_loop   lvm2 [996.00 MiB / 0    free]

  PV /dev/loop4   VG vg01_loop   lvm2 [996.00 MiB / 512.00 MiB free]

  Total: 5 [83.40 GiB] / in use: 5 [83.40 GiB] / in no VG: 0 [0   ]

[root@node3 ~]# vgscan

  Reading all physical volumes.  This may take a while...

  Found volume group "vg_node3" using metadata type lvm2

  Found volume group "vg01_loop" using metadata type lvm2

[root@node3 ~]# lvscan

  ACTIVE            '/dev/vg_node3/lv_root' [50.00 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_home' [25.63 GiB] inherit

  ACTIVE            '/dev/vg_node3/lv_swap' [3.88 GiB] inherit

  inactive          '/dev/vg01_loop/lv01_loop' [3.39 GiB] inherit

[root@node3 ~]# ls /dev/vg

vga_arbiter  vg_node3/

[root@node3 ~]#lvdisplay /dev/vg01_loop/lv01_loop      --发现LV status为not available

 

 

[root@node3 ~]# lvchange -a y /dev/vg01_loop/lv01_loop

[root@node3 ~]# lvdisplay /dev/vg01_loop/lv01_loop

 

 

二.添加新磁盘创建lvm

1.系统中新插入硬盘设备前查看系统中已有的设备

[root@localhost ~]# ls -l /dev/sd*

brw-r----- 1 root disk 8,  0 09-14 17:23 /dev/sda

brw-r----- 1 root disk 8,  1 09-14 17:23 /dev/sda1

brw-r----- 1 root disk 8,  2 09-14 17:23 /dev/sda2

brw-r----- 1 root disk 8,  3 09-14 17:23 /dev/sda3

 

2.插入3块硬盘设备后,再次查看系统中的设备(我这边添加的3块为sdc/sdd/sde)

[root@localhost ~]# ls -l /dev/sd*

brw-r----- 1 root disk 8,  0 09-14 17:23 /dev/sda

brw-r----- 1 root disk 8,  1 09-14 17:23 /dev/sda1

brw-r----- 1 root disk 8,  2 09-14 17:23 /dev/sda2

brw-r----- 1 root disk 8,  3 09-14 17:23 /dev/sda3

brw-r----- 1 root disk 8, 32 09-14 17:23/dev/sdc

brw-r----- 1 root disk 8, 48 09-14 17:23/dev/sdd

brw-r----- 1 root disk 8, 64 09-14 17:23/dev/sde

 

3.我们先用两块硬盘建立LVM,然后在LVM上写入文件后,再添加一块硬盘

1)先创建pv(创建物理卷)

[root@localhost ~]# pvcreate /dev/sdc

 Physical volume "/dev/sdc" successfully created

[root@localhost ~]# pvcreate /dev/sdd

 Physical volume "/dev/sdd" successfully created

[root@localhost ~]# pvscan

  PV/dev/sdc                      lvm2 [1.00GB]

  PV/dev/sdd                      lvm2 [1.00 GB]

 Total: 2 [2.00 GB] / in use: 0 [0  ] / in no VG: 2 [2.00 GB]

 

2)在pv上建立vg(创建卷组)

[root@localhost ~]# vgcreate vg1/dev/{sdc,sdd}

 Volume group "vg1" successfully created

[root@localhost ~]# pvscan

  PV/dev/sdc   VG vg1   lvm2[1020.00 MB / 1020.00 MB free]

  PV/dev/sdd   VG vg1   lvm2 [1020.00 MB / 1020.00 MB free]

 Total: 2 [1.99 GB] / in use: 2 [1.99 GB] / in no VG: 0 [0   ]

 

3)在vg上建立lv(创建逻辑卷)

[root@localhost ~]# lvcreate -L 1500M -nlv1 vg1        //-L 指定大小 -n 指定创建的名字(从卷组vg1中创建lv1,大小为1500M)

 Logical volume "lv1" created

[root@localhost ~]# lvscan

 ACTIVE            '/dev/vg1/lv1'[1.46 GB] inherit

 

4)创建文件系统,格式为ext3

[root@localhost ~]# mkfs.ext3 /dev/vg1/lv1

mke2fs 1.39 (29-May-2006)

Filesystem label=

OS type: Linux

Block size=4096 (log=2)

Fragment size=4096 (log=2)

192000 inodes, 384000 blocks

19200 blocks (5.00%) reserved for the superuser

First data block=0

Maximum filesystem blocks=394264576

12 block groups

32768 blocks per group, 32768 fragments pergroup

16000 inodes per group

Superblock backups stored on blocks:

       32768,98304, 163840, 229376, 294912

 

Writing inode tables: done                           

Creating journal (8192 blocks): done

Writing superblocks and filesystemaccounting information: done

 

This filesystem will be automaticallychecked every 32 mounts or

180 days, whichever comes first.  Use tune2fs -c or -i to override.

 

5)挂载lv到/lvm

[root@localhost ~]# mkdir /lvm

[root@localhost ~]# mount /dev/vg1/lv1/lvm/

 

6)查看挂载设备(上面挂载时临时的,如果要让系统启动时就挂载,需要写入/etc/fstab)

[root@localhost ~]# df -h

文件系统              容量  已用可用已用% 挂载点

/dev/sda3              18G  3.3G  14G  20% /

/dev/sda1             190M   12M 169M   7% /boot

tmpfs                 506M     0 506M   0% /dev/shm

/dev/mapper/vg1-lv1   1.5G  35M  1.4G   3% /lvm

 

7)在逻辑卷中创建文件,查看磁盘发现现在挂载的逻辑卷空间已经不够用了,下面我们添加一块硬盘

[root@localhost ~]# yes > /lvm/test.txt

yes: 标准输出: 设备上没有空间

yes: 写入错误

[root@localhost ~]# df -h

文件系统              容量  已用可用已用% 挂载点

/dev/sda3              18G  3.3G  14G  20% /

/dev/sda1             190M  12M  169M   7% /boot

tmpfs                 506M     0 506M   0% /dev/shm

/dev/mapper/vg1-lv1   1.5G 1.5G     0 100% /lvm

 

4.添加第三块硬盘到逻辑卷中

1)创建pv

[root@localhost ~]# pvcreate /dev/sde

 Physical volume "/dev/sde" successfully created

[root@localhost ~]# pvscan

  PV/dev/sdc   VG vg1             lvm2 [1020.00 MB / 0    free]

  PV/dev/sdd   VG vg1             lvm2 [1020.00 MB / 540.00 MB free]

  PV/dev/sde                      lvm2 [1.00GB]

 Total: 3 [2.99 GB] / in use: 2 [1.99 GB] / in no VG: 1 [1.00 GB]

 

2)将sde添加到vg1中

[root@localhost ~]# vgextend vg1 /dev/sde

 Volume group "vg1" successfully extended

[root@localhost ~]# pvscan

  PV/dev/sdc   VG vg1   lvm2 [1020.00 MB / 0    free]

  PV/dev/sdd   VG vg1   lvm2 [1020.00 MB / 540.00 MB free]

  PV/dev/sde   VG vg1   lvm2 [1020.00 MB / 1020.00 MB free]

 Total: 3 [2.99 GB] / in use: 3 [2.99 GB] / in no VG: 0 [0   ]

 

3)扩增lv容量

[root@localhost ~]# lvextend -L +1.2G/dev/vg1/lv1      //添加1.2G容量

 Rounding up size to full physical extent 1.20 GB

  Extending logical volume lv1 to 2.67 GB

 Logical volume lv1 successfully resized

[root@localhost ~]# lvscan

 ACTIVE            '/dev/vg1/lv1'[2.67 GB] inherit

 

[root@localhost ~]# df -h           //虽然lv已经扩容,但是实际还未生效

文件系统              容量  已用可用已用% 挂载点

/dev/sda3              18G  3.3G  14G  20% /

/dev/sda1             190M   12M 169M   7% /boot

tmpfs                 506M     0 506M   0% /dev/shm

/dev/mapper/vg1-lv1   1.5G 1.5G     0 100% /lvm

 

[root@localhost ~]# resize2fs –f /dev/vg1/lv1     //使lv扩容立即生效,也可以在lvextend扩增时添加-r来直接生效

resize2fs 1.39 (29-May-2006)

Filesystem at /dev/vg1/lv1 is mounted on/lvm; on-line resizing required

Performing an on-line resize of/dev/vg1/lv1 to 699392 (4k) blocks.

The filesystem on /dev/vg1/lv1 is now699392 blocks long.

 

[root@localhost ~]# df -h            //现在lv扩容已经生效

文件系统              容量  已用可用已用% 挂载点

/dev/sda3              18G  3.3G  14G  20% /

/dev/sda1             190M   12M 169M   7% /boot

tmpfs                 506M     0 506M   0% /dev/shm

/dev/mapper/vg1-lv1   2.7G 1.5G  1.1G  58% /lvm

 

 

 

5.开机自动挂载

[root@node3 ~]#vim /etc/fstab      ---追加一行

/dev/mapper/vg01-lv01         /lv01       ext4 defaults          0 0

 

 

 

三.摘录(地址忘记了,以前做文档时忘记贴咯!)

 

Directory and Files

Directories and Files

## Directories

/etc/lvm                    - default lvm directory location 

/etc/lvm/backup         - where the automatic backups go 

/etc/lvm/cache          - persistent filter cache 

/etc/lvm/archive        - where automatic archives go after a volume group change 

/var/lock/lvm             - lock files to prevent metadata corruption 

 

# Files 

/etc/lvm/lvm.conf       - main lvm configuration file 

$HOME/.lvm               - lvm history

Tools

diagnostic

lvmdump

lvmdump -d <dir>

dmsetup [info|ls|status]

 

Note: by default the lvmdump command creates a tar ball

Physical Volumes

display

pvdisplay -v 

pvs -v

pvs -a

pvs --segments (see the disk segments used) 

 

pvs attributes are: 

1. (a)llocatable 

2. e(x)ported

scanning

pvscan -v

 

Note: scans for disks for non-LVM and LVM disks

adding

pvcreate /dev/sdb1

 

## Create physical volume with specific UUID, used to recover volume groups (see miscellaneous section) 

pvcreate --uuid <UUID> /dev/sdb1 

 

Common Attributes that you may want to use:

 

-M2 create a LVM2 physical volume

removing

pvremove /dev/sdb1

checking

pvck -v /dev/sdb1 

 

Note: check the consistency of the LVM metadata

change physical attributes

## do not allow allocation of extents on this drive, however the partition must be in a vg otherwise you get an error 

pvchange -x n /dev/sdb1

 

Common Attributes that you may want to use:

--addtag add a tag

-x allowed to allocate extents

-u change the uuid

moving

pvmove -v /dev/sdb2 /dev/sdb3 

 

Note: moves any used extents from this volume to another volume, in readiness to remove that volume. However you cannot use this on mirrored volumes, you must convert back to non-mirror using "lvconvert -m 0"

Volume Groups

display

vgdisplay -v 

vgs -v

vgs -a -o +devices 

 

vgs flags:

#PV - number of physical devices

#LV - number of configured volumes        

 

vgs attributes are: 

1. permissions (r)|(w)

2. resi(z)eable

3. e(x)ported

4. (p)artial

5. allocation policy - (c)ontiguous, c(l)ing, (n)ormal, (a)nywhere, (i)nherited

6. (c)luster

scanning

vgscan -v

creating

vgcreate VolData00 /dev/sdb1 /dev/sdb2 /dev/sdb3

vgcreate VolData00 /dev/sdb[123]

 

## Use 32MB extent size 

vgcreate VolData00 -s 32 /dev/sdb1 

 

Common Attributes that you may want to use:

 

-l  maximum logical volumes

-p maximum physical volumes

-s physical extent size (default is 4MB)

-A autobackup 

extending

vgextend VolData00 /dev/sdb3

reducing

vgreduce VolData00 /dev/sdb3

 

vgreduce --removemissing --force VolData00

removing

vgremove VolData00

 

Common Attributes that you may want to use:

 

-f force the removal of any logical volumes

checking

vgck VolData00 

 

Note: check the consistency of the LVM metadata

change volume attributes

vgchange -a n VolData00

 

Common Attributes that you may want to use: 

 

-a control availability of volumes within the group

-l  maximum logical volumes

-p maximum physical volumes

-s physical extent size (default is 4MB)

-x resizable yes or no (see VG status in vxdisplay) 

renaming

vgrename VolData00 Data_Vol_01

 

note: the volume group must not have any active logical volumes

converting metadata type

vgconvert -M2 VolData00

 

Note: vgconvert allows you to convert from one type of metadata format to another for example from LVM1 to LVM2, LVM2 offers bigger capacity, clustering and mirroring

merging

# the old volumes group will be merged into the new volume group 

vgmerge New_Vol_Group Old_Vol_Group

 

Note: you must unmount any fielsystems and deactivate the vg that is being merged "vgchange -a n <vg>", then you can activiate it again afterwards "vgchange -a y <vg>", then perform a vgscan, dont forget to backup the configuration

spliting

vgsplit Old_Vol_Group New_Vol_Group [physical volumes] [-n logical volume name]

importing

vgimport VolData00

 

Common Attributes that you may want to use: 

 

-a import all exported volume groups

exporting

## to see if a volume has already been export use "vgs" and look at the third attribute should be a x 

vgexport VolData00

 

Common Attributes that you may want to use: 

 

-a export all inactive volume groups

backing up

## Backup to default location (/etc/lvm/backup) 

vgcfgbackup VolData00

 

# Backup to specific location 

vgcfgbackup -f /var/backup/VolData00_bkup VolData00

# Backup to specific location all volume groups (notice the %s)

vgcfgbackup -f /var/backup/vg_backups_%s 

 

Note: the backup is written in plain text and are by default located in /etc/lvm/backup

restoring

vgcfgrestore -f /var/backup/VolData00_bkup VolData00

 

Common Attributes that you may want to use: 

 

-l list backups of file

-f backup file

-M metadataype 1 or 2

cloning

vgimportclone /dev/sdb1

 

Note: used to import and rename duplicated volume group

special files

vgmknodes VolData00

 

Note: recreates volume group directory and logical volume special files in /dev

Logical Volumes

display

lvdisplay -v 

lvdisplay --maps      display mirror volumes

 

lvs -v

lvs -a -o +devices 

 

## lvs commands for mirror volumes 

lvs -a -o +devices

lvs -a -o +seg_pe_ranges --segments

## Stripe size 

lvs -v --segments

lvs -a -o +stripes,stripesize

## use complex command

lvs -a -o +devices,stripes,stripesize,seg_pe_ranges --segments 

 

lvs attributes are: 

1. volume type: (m)irrored, (M)irrored without initail sync, (o)rigin, (p)vmove, (s)napshot, invalid (S)napshot, (v)irtual, mirror (i)mage

                      mirror (I)mage out-of-sync, under (c)onversion

2. permissions: (w)rite, (r)ead-only

3. allocation policy - (c)ontiguous, c(l)ing, (n)ormal, (a)nywhere, (i)nherited

4. fixed (m)inor 

5. state: (a)ctive, (s)uspended, (I)nvalid snapshot, invalid (S)uspended snapshot, mapped (d)evice present with-out tables,

             mapped device present with (i)nactive table 

6. device (o)pen (mounted in other words)

scanning

lvscan -v 

lvmdiskscan

creating

## plain old volume 

lvcreate -L 10M VolData00

## plain old volume but use extents, use 10 4MB extents (if extent size is 4MB) 

lvcreate -l 10 VolData00

 

## plain old volume but with a specific name web01

lvcreate -L 10M -n web01 VolData00 

 

## plain old volume but on a specific disk 

lvcreate -L 10M VolData00 /dev/sdb1

 

## a striped volume called lvol1 (note the captial i for the stripe size), can use -l (extents) instead of -L 

lvcreate -i 3 -L 24M -n lvol1 vg01

## Mirrored volume

lvcreate -L 10M -m1 -n data01 vg01

 

## Mirrored volume without a mirror log file

lvcreate -L 10M -m1 --mirrorlog core -n data01 vg01

 

Common Attributes that you may want to use: 

 

-L size of the volume [kKmMgGtT]

-l number of extents

-C contiguous [y|n]

-i stripes

-I stripe size

-m mirrors

--mirrorlog 

-n volume name

extending

lvextend -L 20M /dev/VolData00/vol01

Common Attributes that you may want to use:

-L size of the volume [kKmMgGtT]

-l number of extents

-C contiguous [y|n]

-i stripes

-I stripe size

 

Note: you can extend a ext2/ext3 filesystem using the "resize2fs" or "fsadm" command 

 

fsadm resize /dev/VolData01/data01

resize2fs -p /dev/mapper/VolData01-data01 [size] 

 

The -p option displays bars of progress while extendingthe filesystem

reducing/resizing

lvreduce -L 5M /dev/VolData00/vol01

lvresize -L 5M /dev/VolData00/vol01

 

Note: rounding will occur when extending and reducing volumes to the next extent (4MB by default), you can use resize2fs or fsadm to shrink the filesystem

fsadm resize /dev/VolData01/data01 [size] 

resize2fs -p /dev/mapper/VolData01-data01 [size]

removing

lvremove /dev/VolData00/vol01

adding a mirror to a non-mirrored volume

lvconvert -m1 --mirrorlog core /dev/VolData00/vol01 /dev/sdb2

Note: you can also use the above command to remove a unwanted log

removing a mirror from a mirrored volume

lvconvert -m0 /dev/VolData00/vol01 /dev/sdb2

Note: the disk in the command is the one you want to remove

Mirror a volume that has stripes

lvconvert --stripes 3 -m1 --mirrorlog core /dev/VolData00/data01 /dev/sdd1 /dev/sde1 /devsdf1

change volume attributes

lvchange -a n /dev/VolData00/vol01

 

Common Attributes that you may want to use:

-a availability

-C contiguous [y|n]

renaming

lvrename /dev/VolData00/vol_old /dev/VolData00/vol_new

snapshotting

lvcreate --size 100M --snapshot -name snap /dev/vg01/data01

Miscellaneous

Simulating a disk failure

dd if=/dev/zero of=/dev/sdb2 count=10

reparing a failed mirror no LVM corruption

## check volume, persume /dev/sdb2 has failed 

lvs -a -o +devices

 

# remove the failed disk from the volume (if not already done so) , this will convert volume into a non-mirrored volume 

vgreduce --removemissing --force VolData00

 

## replace the disk physically, remember to partion it with type 8e 

fdisk /dev/sdb

........ 

 

## add new disk to LVM

pvcreate /dev/sdb2

 

## add the disk back into volume group

vgextend VolData00 /dev/sdb2

 

## mirror up the volume

lvconvert -m1 --mirrorlog core /dev/VolData00/vol02 /dev/sdb2

corrupt LVM metadata without replacing drive

# attempt to bring the volume group online

vgchange -a y VolData00

 

# Restore the LVM configation

vgcfgrestore VolData00

 

# attempt to bring the volume grou online

vgchange -a y VolData00

 

# file system check

e2fsck /dev/VolData00/data01

corrupt LVM metadata but replacing the faulty disk

# attempt to bring the volume group online but you get UUID conflict errors make note of the UUID number 

vgchange -a y VolData00

vgchange -a n VolData00 

 

## sometimes it my only be a logical volume problem

lvchange -a y /dev/VolData00/web02

lvchange -a n /dev/Voldata00/web02 

 

## replace the disk physically, remember to partion it with type 8e 

fdisk /dev/sdb

........

 

# after replacing the faulty drive the disk must have the previuos UUID number or you can get it from /etc/lvm directory 

pvcreate --uuid <previous UUID number taken from above command> /dev/sdb2

 

# Restore the LVM configation

vgcfgrestore VolData00

 

# attempt to bring the volume group online or logical volume 

vgchange -a y VolData00

lvchange -a y /dev/VolData00/web02 

 

# file system check

e2fsck /dev/VolData00/data01

Note: if you have backed the volume group configuration you can obtain the UUID number in the backup file by default located in /etc/lvm/backup or running "pvs -v"

 

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/30130773/viewspace-2097282/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/30130773/viewspace-2097282/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值