在dbca静默建库时出现了错误:
./dbca -silent -createDatabase -ignorePreReqs -responseFile /u01/app/oracle1/product/19/db_home1/assistants/dbca/dbca.rsp
[FATAL] [DBT-06604] The location specified for 'Fast Recovery Area Location' has insufficient free space.
CAUSE: Only (9,616MB) free space is available on the location (+ARCH/flash_recovery_area/att/).
ACTION: Choose a 'Fast Recovery Area Location' that has enough space (minimum of (10,206MB)) or free up space on the specified location.
报错提示闪回区最小限制是10,206MB,当前闪回区可用只有9616MB,选择足够空间大小的磁盘作为闪回区。
为了演示扩容操作我们对FRA所在磁盘组进行扩容
目录
4.1查看asm_disk视图发现CANDIDATE状态的dm-9和dm-10与之前多路径绑定的盘符相同
1.存储加盘
确定FRA的磁盘组是arch磁盘组
如果是生产环境申请存储工程师按照arch磁盘组规格添加两块新的磁盘
2.操作系统配置
注意:本环境集群节点是三个,如果是两个操作相同
2.1每台数据库服务器重新登录存储链路
重新登录存储链路是为了刷新磁盘列表,让操作系统重新扫描到新添加的磁盘
[root@antute01 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --logout
[root@antute01 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --login
[root@antute02 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --logout
[root@antute02 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --login
[root@antute03 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --logout
[root@antute03 ~]# iscsiadm --mode node --targetname iqn.2006-01.com.openfiler:tsn.1a16975c28c8 --portal 192.168.125.100:3260 --login
2.2查看新添加的磁盘(三节点识别的信息是一致的)
发现了添加的两块新磁盘
[root@antute01 ~]# fdisk -l
Disk /dev/sdp: 20.5 GB, 20468203520 bytes, 39976960 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/sdq: 20.5 GB, 20468203520 bytes, 39976960 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
2.3配置multipath配置文件白名单
确认每台服务器获取到的磁盘uuid一致
[root@antute01 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdp
14f504e46494c45527a6b424669552d7a4a43582d4b496150
[root@antute01 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdq
14f504e46494c4552454f314b495a2d326877482d4e466c52
[root@antute02 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdp
14f504e46494c45527a6b424669552d7a4a43582d4b496150
[root@antute02 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdq
14f504e46494c4552454f314b495a2d326877482d4e466c52
[root@antute03 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdp
14f504e46494c45527a6b424669552d7a4a43582d4b496150
[root@antute03 ~]# /lib/udev/scsi_id --whitelisted --device=/dev/sdq
14f504e46494c4552454f314b495a2d326877482d4e466c52
2.4将新磁盘信息添加到配置文件(三个节点同步操作)
vi /etc/multipath.conf
在disk_arch2 后添加
重启多路径服务
[root@antute01 ~]# systemctl stop multipathd.service
[root@antute01 ~]# systemctl start multipathd.service
[root@antute01 ~]# multipath -ll
重启多路径服务后依然没有看到新磁盘,重启操作系统后查看
重启后可以看到新增加的disk_arch3、disk_arch4
2.5对二三节点进行相同配置
添加新磁盘信息
vi /etc/multipath.conf
multipath{
wwid 14f504e46494c45527a6b424669552d7a4a43582d4b496150
alias disk_arch3
}
multipath{
wwid 14f504e46494c4552454f314b495a2d326877482d4e466c52
alias disk_arch4
}
重启操作系统后查看二三节点识别磁盘的情况
[root@antute02 ~]# multipath -ll
disk_arch3 (14f504e46494c45527a6b424669552d7a4a43582d4b496150) dm-9 OPNFILER,VIRTUAL-DISK
size=19G features='0' hwhandler='0' wp=rw
|-+- policy='service-time 0' prio=1 status=active
| `- 33:0:0:7 sdk 8:160 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 34:0:0:7 sdr 65:16 active ready running
disk_arch4 (14f504e46494c4552454f314b495a2d326877482d4e466c52) dm-10 OPNFILER,VIRTUAL-DISK
size=19G features='0' hwhandler='0' wp=rw
|-+- policy='service-time 0' prio=1 status=active
| `- 33:0:0:8 sdl 8:176 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 34:0:0:8 sds 65:32 active ready running
[root@antute03 ~]# multipath -ll
disk_arch3 (14f504e46494c45527a6b424669552d7a4a43582d4b496150) dm-10 OPNFILER,VIRTUAL-DISK
size=19G features='0' hwhandler='0' wp=rw
|-+- policy='service-time 0' prio=1 status=active
| `- 34:0:0:7 sdk 8:160 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 33:0:0:7 sdr 65:16 active ready running
disk_arch4 (14f504e46494c4552454f314b495a2d326877482d4e466c52) dm-9 OPNFILER,VIRTUAL-DISK
size=19G features='0' hwhandler='0' wp=rw
|-+- policy='service-time 0' prio=1 status=active
| `- 34:0:0:8 sdm 8:192 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 33:0:0:8 sds 65:32 active ready running
3.配置udev(三节点保持一致)
3.1udev添加磁盘信息
注意uuid一定要与multipath配置文件绑定的uuid保持一致
vi /etc/udev/rules.d/98-oracle-asmdevices.rules
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45527a6b424669552d7a4a43582d4b496150",SYMLINK+="asm/asmdisk007",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c4552454f314b495a2d326877482d4e466c52",SYMLINK+="asm/asmdisk007",OWNER="grid",GROUP="asmadmin",MODE="0660"
3.2重启udev,发现dm-9、dm-10用户组正确
[root@antute01 ~]# udevadm trigger
[root@antute01 ~]# ld -al /dev/dm*
ld: unrecognized option '-al'
ld: use the --help option for usage information
[root@antute01 ~]# ls -al /dev/dm*
brw-rw---- 1 root disk 252, 0 Jun 3 17:26 /dev/dm-0
brw-rw---- 1 root disk 252, 1 Jun 3 17:26 /dev/dm-1
brw-rw---- 1 grid asmadmin 252, 10 Jun 3 17:26 /dev/dm-10
brw-rw---- 1 grid asmadmin 252, 2 Jun 3 17:26 /dev/dm-2
brw-rw---- 1 grid asmadmin 252, 3 Jun 3 17:26 /dev/dm-3
brw-rw---- 1 grid asmadmin 252, 4 Jun 3 17:26 /dev/dm-4
brw-rw---- 1 grid asmadmin 252, 5 Jun 3 17:26 /dev/dm-5
brw-rw---- 1 grid asmadmin 252, 6 Jun 3 17:26 /dev/dm-6
brw-rw---- 1 grid asmadmin 252, 7 Jun 3 17:26 /dev/dm-7
brw-rw---- 1 grid asmadmin 252, 8 Jun 3 17:26 /dev/dm-8
brw-rw---- 1 grid asmadmin 252, 9 Jun 3 17:26 /dev/dm-9
二三节点同步操作
4.登录asm实例对arch磁盘组添加磁盘
4.1查看asm_disk视图发现CANDIDATE状态的dm-9和dm-10与之前多路径绑定的盘符相同
[grid@antute01 ~]$ sqlplus / as sysasm
SQL> col PATH for a10
SQL> select GROUP_NUMBER,DISK_NUMBER,MOUNT_STATUS,HEADER_STATUS,TOTAL_MB/1024,PATH from v$asm_disk;
GROUP_NUMBER DISK_NUMBER MOUNT_S HEADER_STATU TOTAL_MB/1024 PATH
------------ ----------- ------- ------------ ------------- ----------
0 0 CLOSED CANDIDATE 0 /dev/dm-9
0 1 CLOSED CANDIDATE 0 /dev/dm-10
2 0 CACHED MEMBER 9.53125 /dev/dm-8
3 0 CACHED MEMBER 47.65625 /dev/dm-7
3 1 CACHED MEMBER 47.65625 /dev/dm-6
2 1 CACHED MEMBER 9.53125 /dev/dm-5
2 2 CACHED MEMBER 9.53125 /dev/dm-4
1 1 CACHED MEMBER 9.53125 /dev/dm-3
1 0 CACHED MEMBER 9.53125 /dev/dm-2
4.2确认扩容磁盘组arch
SQL> select name from v$asm_diskgroup;
NAME
------------------------------
ARCH
CRS
DATA
4.3二三节点同步操作
在一节点添加磁盘
SQL> alter diskgroup ARCH add disk '/dev/dm-9';
SQL> alter diskgroup ARCH add disk '/dev/dm-10';
SQL> select GROUP_NUMBER,NAME,FAILGROUP,MOUNT_STATUS,HEADER_STATUS,TOTAL_MB/1024,PATH from v$asm_disk order by GROUP_NUMBER;
GROUP_NUMBER NAME FAILGROUP MOUNT_S HEADER_STATU TOTAL_MB/1024 PATH
------------ ---------- ---------- ------- ------------ ------------- ----------
1 ARCH_0001 ARCH_0001 CACHED MEMBER 9.53125 /dev/dm-3
1 ARCH_0002 ARCH_0002 CACHED MEMBER 19.0625 /dev/dm-9
1 ARCH_0000 ARCH_0000 CACHED MEMBER 9.53125 /dev/dm-2
1 ARCH_0003 ARCH_0003 CACHED MEMBER 19.0625 /dev/dm-10
2 CRS_0001 CRS_0001 CACHED MEMBER 9.53125 /dev/dm-5
2 CRS_0000 CRS_0000 CACHED MEMBER 9.53125 /dev/dm-8
2 CRS_0002 CRS_0002 CACHED MEMBER 9.53125 /dev/dm-4
3 DATA_0000 DATA_0000 CACHED MEMBER 47.65625 /dev/dm-7
3 DATA_0001 DATA_0001 CACHED MEMBER 47.65625 /dev/dm-6
4.4二三节点查看磁盘组状态
磁盘状态为member,磁盘组扩容完成。