多块盘创建RAID10以及后增加空间

✅ 创建硬盘并挂载到EC2上,后查询如下

[root@ip-127-0-0-1 data]# lsblk
NAME          MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
nvme0n1       259:0    0  40G  0 disk 
├─nvme0n1p1   259:1    0  40G  0 part /
├─nvme0n1p127 259:2    0   1M  0 part 
└─nvme0n1p128 259:3    0  10M  0 part /boot/efi
nvme1n1       259:4    0  25G  0 disk 
nvme2n1       259:5    0  25G  0 disk 
nvme3n1       259:6    0  25G  0 disk 
nvme4n1       259:7    0  25G  0 disk 
nvme5n1       259:8    0  25G  0 disk 
nvme6n1       259:9    0  25G  0 disk 
nvme7n1       259:10   0  25G  0 disk

✅ 创建 RAID10 阵列

[root@ip-127-0-0-1 data]# mdadm --create /dev/md10 --level=10 --raid-devices=4 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/nvme4n1
md[1043803.786997] md/raid10:md10: not clean -- starting background reconstruction
adm: Defaulting [1043803.826572] md/raid10:md10: active with 4 out of 4 devices
to version 1.2 m[1043803.860346] md10: detected capacity change from 0 to 104787968
etadata
[1043803.903222] md: resync of RAID array md10
mdadm: array /dev/md10 started.

✅ 查看创建过程

✅ 查看创建的状态
[root@ip-127-0-0-1 data]# mdadm --detail /dev/md10
/dev/md10:
           Version : 1.2
     Creation Time : Tue Apr 29 09:52:23 2025
        Raid Level : raid10
        Array Size : 52393984 (49.97 GiB 53.65 GB)
     Used Dev Size : 26196992 (24.98 GiB 26.83 GB)
      Raid Devices : 4
     Total Devices : 4
       Persistence : Superblock is persistent

       Update Time : Tue Apr 29 09:53:01 2025
             State : clean, resyncing 
    Active Devices : 4
   Working Devices : 4
    Failed Devices : 0
     Spare Devices : 0

            Layout : near=2
        Chunk Size : 512K

Consistency Policy : resync

     Resync Status : 15% complete

              Name : 10
              UUID : 3c13fb76:d537942d:e97eba81:87c0ca4e
            Events : 2

    Number   Major   Minor   RaidDevice State
       0     259        4        0      active sync set-A   /dev/sdb
       1     259        5        1      active sync set-B   /dev/sdc
       2     259        6        2      active sync set-A   /dev/sdd
       3     259        7        3      active sync set-B   /dev/sde
✅ 查看创建进度
[root@ip-127-0-0-1 data]# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
md10 : active raid10 nvme4n1[3] nvme3n1[2] nvme2n1[1] nvme1n1[0]
      52393984 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
      [====>................]  resync = 21.5% (11302400/52393984) finish=3.9min speed=171264K/sec
      
unused devices: <none>
[root@ip-127-0-0-1 data]# [1044112.151499] md: md10: resync done.
cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
md10 : active raid10 nvme4n1[3] nvme3n1[2] nvme2n1[1] nvme1n1[0]
      52393984 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
      
unused devices: <none>
等到进度条达到100%,大概要等个6-8min,否则后面会报错
[root@ip-127-0-0-1 data]# mkfs.xfs /dev/md10
log stripe unit (524288 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/md10              isize=512    agcount=16, agsize=818560 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1    bigtime=1 inobtcount=1
data     =                       bsize=4096   blocks=13096960, imaxpct=25
         =                       sunit=128    swidth=256 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=16384, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

✅ 配置自动挂载 (持久化)

✅ 更新 mdadm 配置
[root@ip-127-0-0-1 data]# mdadm --detail --scan | sudo tee -a /etc/mdadm.conf
ARRAY /dev/md10 metadata=1.2 name=10 UUID=3c13fb76:d537942d:e97eba81:87c0ca4econf
查看RAID10的属性
[root@ip-127-0-0-1 data]# lsblk
NAME          MAJ:MIN RM SIZE RO TYPE   MOUNTPOINTS
nvme0n1       259:0    0  40G  0 disk   
├─nvme0n1p1   259:1    0  40G  0 part   /
├─nvme0n1p127 259:2    0   1M  0 part   
└─nvme0n1p128 259:3    0  10M  0 part   /boot/efi
nvme1n1       259:4    0  25G  0 disk   
└─md10          9:10   0  50G  0 raid10 
nvme2n1       259:5    0  25G  0 disk   
└─md10          9:10   0  50G  0 raid10 
nvme3n1       259:6    0  25G  0 disk   
└─md10          9:10   0  50G  0 raid10 
nvme4n1       259:7    0  25G  0 disk   
└─md10          9:10   0  50G  0 raid10 
nvme5n1       259:8    0  25G  0 disk   
nvme6n1       259:9    0  25G  0 disk   
nvme7n1       259:10   0  25G  0 disk 
✅ 查看RAID10的id
[root@ip-127-0-0-1 data]# blkid /dev/md10
/dev/md10: UUID="a6999bb8-292f-40d0-8305-4fc471b3e7c8" BLOCK_SIZE="512" TYPE="xfs
✅ 配置开启自动挂载
[root@ip-127-0-0-1 data]# tail -1 /etc/fstab
UUID=a6999bb8-292f-40d0-8305-4fc471b3e7c8 /data/raid-storge/  xfs  defaults,nofail  0  0
[root@ip-172-31-26-146 data]# mount -a
[1044374.899097] XFS (md10): Mounting V5 Filesystem
[1044374.961019] XFS (md10): Ending clean mount

✅ 查看挂载后的路径目录大小

[root@ip-127-0-0-1 data]# df -h /data/raid-storge/
Filesystem      Size  Used Avail Use% Mounted on
/dev/md10        50G  390M   50G   1% /data/raid-storge
⚠️ 注意事项

通过添加两块等同大小的硬盘的方式对RAID10进行扩容
如果 /dev/nvme5n1 . /dev/nvme6n1上有分区或文件系统,请清除

✅ 扩展 RAID10 阵列(增加设备数)

✅ 如果 /dev/nvme5n1 上有分区或文件系统,请清除:
[root@ip-127-0-0-1 data]# wipefs -a /dev/nvme5n1
[root@ip-127-0-0-1 data]# wipefs -a /dev/nvme6n1
[root@ip-127-0-0-1 data]# dd if=/dev/zero of=/dev/nvme5n1 bs=1M count=100
100+0 records in
100+0 records out
104857600 bytes (105 MB, 100 MiB) copied, 0.177173 s, 592 MB/s
[root@ip-172-31-26-146 data]# dd if=/dev/zero of=/dev/nvme6n1 bs=1M count=100
100+0 records in
100+0 records out
104857600 bytes (105 MB, 100 MiB) copied, 0.305175 s, 344 MB/s
✅ 添加两块磁盘到阵列
[root@ip-127-0-0-1 data]# mdadm --add /dev/md10 /dev/nvme5n1 /dev/nvme6n1
mdadm: added /dev/nvme5n1
mdadm: added /dev/nvme6n1
✅ 扩展 RAID10 阵列(增加设备数)
[root@ip-127-0-0-1 data]# mdadm --grow /dev/md10 --raid-devices=6

这边有两块,等第一块完成了后还要等第二块,第二块会更快点,当看到两个resync done.,就代表成功了

[root@ip-172-31-26-146 data]# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
md10 : active raid10 nvme6n1[5] nvme5n1[4] nvme4n1[3] nvme3n1[2] nvme2n1[1] nvme1n1[0]
      52393984 blocks super 1.2 512K chunks 2 near-copies [6/6] [UUUUUU]
      [==>..................]  reshape = 13.2% (6963328/52393984) finish=6.5min speed=115607K/sec
      
unused devices: <none>
[root@ip-172-31-26-146 data]# [1045086.990732] md: md10: reshape done.
[1045087.183511] md10: detected capacity change from 104787968 to 157181952
[1045087.211384] md: resync of RAID array md10
[root@ip-172-31-26-146 data]# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] 
md10 : active raid10 nvme6n1[5] nvme5n1[4] nvme4n1[3] nvme3n1[2] nvme2n1[1] nvme1n1[0]
      78590976 blocks super 1.2 512K chunks 2 near-copies [6/6] [UUUUUU]
      [==============>......]  resync = 72.8% (57252352/78590976) finish=2.0min speed=173513K/sec
      
unused devices: <none>
[1045241.575758] md: md10: resync done.
✅ 更新 mdadm 配置文件
[root@ip-127-0-0-1 data]# mdadm --detail --scan >> /etc/mdadm.conf
[root@ip-127-0-0-1 data]# cat /etc/mdadm.conf 
ARRAY /dev/md10 metadata=1.2 name=10 UUID=3c13fb76:d537942d:e97eba81:87c0ca4e
ARRAY /dev/md10 metadata=1.2 name=10 UUID=3c13fb76:d537942d:e97eba81:87c0ca4e
✅ 扩展文件系统
如果你用的是 XFS 文件系统:
[root@ip-127-0-0-1 data]# xfs_growfs /data/raid-storge
meta-data=/dev/md10              isize=512    agcount=16, agsize=818560 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1    bigtime=1 inobtcount=1
data     =                       bsize=4096   blocks=13096960, imaxpct=25
         =                       sunit=128    swidth=256 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=16384, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 13096960 to 19647744
✅ 验证新容量
[root@ip-127-0-0-1 data]# df -hT /data/raid-storge/
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/md10      xfs    75G  569M   75G   1% /data/raid-storge
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

yunson_Liu

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值