六.
1.两块磁盘一起读写的设置方法
mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
-------- ------ - ---- ---- -------------
你要建立设备的名称 创建出来 等级 用几块设备 闲置几块 需要用到的设备
45 fdisk /dev/vdb
46 partprobe
47 fdisk -l
48 fdisk /dev/vdb
49 fdisk -l
50 mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
51 df -h
52 mkfs.xfs /dev/md0
53 mount /dev/md0 /mnt
54 df -h
-----------------------------------------------------------------------------
划3块硬盘
Device Boot Start End Blocks Id System
/dev/vdb1 2048 2099199 1048576 fd Linux raid autodetect
/dev/vdb2 2099200 4196351 1048576 fd Linux raid autodetect
/dev/vdb3 4196352 5220351 512000 fd Linux raid autodetect
[root@localhost ~]# mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
mdadm: largest drive (/dev/vdb1) exceeds size (511680K) by more than 1%
Continue creating array?
Continue creating array? (y/n) y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 10G 3.7G 6.4G 37% /
devtmpfs 906M 0 906M 0% /dev
tmpfs 921M 140K 921M 1% /dev/shm
tmpfs 921M 17M 904M 2% /run
tmpfs 921M 0 921M 0% /sys/fs/cgroup
[root@localhost ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0 isize=256 agcount=4, agsize=31980 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0
data = bsize=4096 blocks=127920, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=853, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mount /dev/md0 /mnt
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 10G 3.7G 6.4G 37% /
devtmpfs 906M 0 906M 0% /dev
tmpfs 921M 140K 921M 1% /dev/shm
tmpfs 921M 17M 904M 2% /run
tmpfs 921M 0 921M 0% /sys/fs/cgroup
/dev/md0 497M 26M 472M 6% /mnt
[root@localhost ~]#
-----------------------------------------------------------------------------
2.对设备的状态进行更改
mdadm -D /dev/md0 ##查看设备状态
mdadm -f /dev/md0 /dev/vdb2 ##使某块坏掉
mdadm -a /dev/md0 /dev/vdb2 ##增加某块设备
mdadm -r /dev/md0 /dev/vdb2 ##移除某块设备
监控命令:watch -n 1 'cat /proc/mdstat;echo =======;df -h'
56 mdadm -D /dev/md0 ##查看设备状态
57 mdadm -f /dev/md0 /dev/vdb2 ##使某块坏掉
58 mdadm -D /dev/md0
59 mdadm -r /dev/md0 /dev/vdb2 ##移除某块设备
60 mdadm -D /dev/md0
61 mdadm -a /dev/md0 /dev/vdb2
62 mdadm -D /dev/md0
mdadm -a /dev/md0 /dev/vdb2 ##增加某块设备
-----------------------------------------------------------------------------
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:45:19 2017
State : clean
Active Devices : 2
Working Devices : 3
Failed Devices : 0
Spare Devices : 1
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 17
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
1 253 18 1 active sync /dev/vdb2
2 253 19 - spare /dev/vdb3
[root@localhost ~]# mdadm -f /dev/md0 /dev/vdb2
mdadm: set /dev/vdb2 faulty in /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:57:57 2017
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 1
Spare Devices : 0
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 36
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
1 253 18 - faulty /dev/vdb2
[root@localhost ~]# mdadm -r /dev/md0 /dev/vdb2
mdadm: hot removed /dev/vdb2 from /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:58:22 2017
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 37
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
[root@localhost ~]# mdadm -a /dev/md0 /dev/vdb2
mdadm: added /dev/vdb2
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:58:37 2017
State : clean
Active Devices : 2
Working Devices : 3
Failed Devices : 0
Spare Devices : 1
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 38
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
3 253 18 - spare /dev/vdb2
[root@localhost ~]#
-------------------------------------------------------------------------------
1.两块磁盘一起读写的设置方法
mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
-------- ------ - ---- ---- -------------
你要建立设备的名称 创建出来 等级 用几块设备 闲置几块 需要用到的设备
45 fdisk /dev/vdb
46 partprobe
47 fdisk -l
48 fdisk /dev/vdb
49 fdisk -l
50 mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
51 df -h
52 mkfs.xfs /dev/md0
53 mount /dev/md0 /mnt
54 df -h
-----------------------------------------------------------------------------
划3块硬盘
Device Boot Start End Blocks Id System
/dev/vdb1 2048 2099199 1048576 fd Linux raid autodetect
/dev/vdb2 2099200 4196351 1048576 fd Linux raid autodetect
/dev/vdb3 4196352 5220351 512000 fd Linux raid autodetect
[root@localhost ~]# mdadm -C /dev/md0 -a yes -l 1 -n 2 -x 1 /dev/vdb{1..3}
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
mdadm: largest drive (/dev/vdb1) exceeds size (511680K) by more than 1%
Continue creating array?
Continue creating array? (y/n) y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 10G 3.7G 6.4G 37% /
devtmpfs 906M 0 906M 0% /dev
tmpfs 921M 140K 921M 1% /dev/shm
tmpfs 921M 17M 904M 2% /run
tmpfs 921M 0 921M 0% /sys/fs/cgroup
[root@localhost ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0 isize=256 agcount=4, agsize=31980 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0
data = bsize=4096 blocks=127920, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=853, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mount /dev/md0 /mnt
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 10G 3.7G 6.4G 37% /
devtmpfs 906M 0 906M 0% /dev
tmpfs 921M 140K 921M 1% /dev/shm
tmpfs 921M 17M 904M 2% /run
tmpfs 921M 0 921M 0% /sys/fs/cgroup
/dev/md0 497M 26M 472M 6% /mnt
[root@localhost ~]#
-----------------------------------------------------------------------------
2.对设备的状态进行更改
mdadm -D /dev/md0 ##查看设备状态
mdadm -f /dev/md0 /dev/vdb2 ##使某块坏掉
mdadm -a /dev/md0 /dev/vdb2 ##增加某块设备
mdadm -r /dev/md0 /dev/vdb2 ##移除某块设备
监控命令:watch -n 1 'cat /proc/mdstat;echo =======;df -h'
56 mdadm -D /dev/md0 ##查看设备状态
57 mdadm -f /dev/md0 /dev/vdb2 ##使某块坏掉
58 mdadm -D /dev/md0
59 mdadm -r /dev/md0 /dev/vdb2 ##移除某块设备
60 mdadm -D /dev/md0
61 mdadm -a /dev/md0 /dev/vdb2
62 mdadm -D /dev/md0
mdadm -a /dev/md0 /dev/vdb2 ##增加某块设备
-----------------------------------------------------------------------------
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:45:19 2017
State : clean
Active Devices : 2
Working Devices : 3
Failed Devices : 0
Spare Devices : 1
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 17
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
1 253 18 1 active sync /dev/vdb2
2 253 19 - spare /dev/vdb3
[root@localhost ~]# mdadm -f /dev/md0 /dev/vdb2
mdadm: set /dev/vdb2 faulty in /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:57:57 2017
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 1
Spare Devices : 0
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 36
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
1 253 18 - faulty /dev/vdb2
[root@localhost ~]# mdadm -r /dev/md0 /dev/vdb2
mdadm: hot removed /dev/vdb2 from /dev/md0
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:58:22 2017
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 37
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
[root@localhost ~]# mdadm -a /dev/md0 /dev/vdb2
mdadm: added /dev/vdb2
[root@localhost ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sat Apr 22 02:44:00 2017
Raid Level : raid1
Array Size : 511680 (499.77 MiB 523.96 MB)
Used Dev Size : 511680 (499.77 MiB 523.96 MB)
Raid Devices : 2
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sat Apr 22 02:58:37 2017
State : clean
Active Devices : 2
Working Devices : 3
Failed Devices : 0
Spare Devices : 1
Name : localhost:0 (local to host localhost)
UUID : af9e3fe4:dfed6efc:02461f8e:af60c0c1
Events : 38
Number Major Minor RaidDevice State
0 253 17 0 active sync /dev/vdb1
2 253 19 1 active sync /dev/vdb3
3 253 18 - spare /dev/vdb2
[root@localhost ~]#
-------------------------------------------------------------------------------