操作系统版本:
数据库版本:
环境说明:
过程步骤:
按照metalink 文档 ID 1063571.1往集群添加network的步骤,是会有问题的,具体过程如下:
跑节点2上去了,让它跑回节点1:
查看日志并无明显错误信息。
- [oracle@rac2 ~]$ uname -a
- Linux rac2.example.com 2.6.32-431.el6.x86_64 #1 SMP Sun Nov 10 22:19:54 EST 2013 x86_64 x86_64 x86_64 GNU/Linux
- [oracle@rac2 ~]$ lsb_release -a
- LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch
- Distributor ID: RedHatEnterpriseServer
- Description: Red Hat Enterprise Linux Server release 6.5 (Santiago)
- Release: 6.5
- Codename: Santiago
数据库版本:
- SYS@proc2> select * from v$version where rownum=1;
-
- BANNER
- --------------------------------------------------------------------------------
- Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
环境说明:
- [root@rac2 ~]# ifconfig
- eth0 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
- inet addr:192.168.28.200 Bcast:192.168.28.255 Mask:255.255.255.0
- inet6 addr: fe80::20c:29ff:feed:b097/64 Scope:Link
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- RX packets:3154 errors:0 dropped:0 overruns:0 frame:0
- TX packets:2360 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
- RX bytes:283889 (277.2 KiB) TX bytes:445528 (435.0 KiB)
-
- eth0:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
- inet addr:192.168.28.222 Bcast:192.168.28.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
- eth1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
- inet addr:10.0.0.200 Bcast:10.0.0.255 Mask:255.255.255.0
- inet6 addr: fe80::20c:29ff:feed:b0a1/64 Scope:Link
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- RX packets:92201 errors:0 dropped:0 overruns:0 frame:0
- TX packets:72680 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
- RX bytes:66845524 (63.7 MiB) TX bytes:42997425 (41.0 MiB)
-
- eth1:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
- inet addr:169.254.227.158 Bcast:169.254.255.255 Mask:255.255.0.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
- ##新添加的public网卡信息##
- eth2 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
- inet addr:20.20.20.200 Bcast:20.20.20.255 Mask:255.255.255.0
- inet6 addr: fe80::20c:29ff:feed:b0ab/64 Scope:Link
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- RX packets:115 errors:0 dropped:0 overruns:0 frame:0
- TX packets:154 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
- RX bytes:10309 (10.0 KiB) TX bytes:16597 (16.2 KiB)
- [root@rac2 ~]# cat /etc/hosts ##背景色标红为新网卡信息以及预添加的vip信息##
- 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
- ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
-
- ##public##
- 192.168.28.100 rac1.example.com rac1
- 192.168.28.200 rac2.example.com rac2
- 20.20.20.100 rac1_2
- 20.20.20.200 rac2_2
-
- ##private##
- 10.0.0.100 rac1-priv.example.com rac1-priv
- 10.0.0.200 rac2-priv.example.com rac2-priv
-
-
- ##vip##
- 192.168.28.111 rac1-vip.example.com rac1-vip
- 192.168.28.222 rac2-vip.example.com rac2-vip
- 20.20.20.111 rac1-vip2.example.com rac1-vip2
- 20.20.20.222 rac2-vip2.example.com rac2-vip2
-
-
- ##scan##
- 192.168.28.233 scan-ip
过程步骤:
按照metalink 文档 ID 1063571.1往集群添加network的步骤,是会有问题的,具体过程如下:
- [root@rac2 ~]# srvctl add network -k 2 -S 20.20.20.0/255.255.255.0/eth2
- [root@rac2 ~]# srvctl config network
- Network exists: 1/192.168.28.0/255.255.255.0/eth0, type static
- Network exists: 2/20.20.20.0/255.255.255.0/eth2, type static
- [root@rac2 ~]# srvctl status vip -n rac1
- VIP 20.20.20.111 is enabled
- VIP 20.20.20.111 is not running
- VIP rac1-vip is enabled
- VIP rac1-vip is running on node: rac1
- [root@rac2 ~]# srvctl status vip -n rac2
- VIP 20.20.20.222 is enabled
- VIP 20.20.20.222 is not running
- VIP rac2-vip is enabled
- VIP rac2-vip is running on node: rac2
- [root@rac2 ~]#
- [root@rac2 ~]# srvctl start vip -n rac1
- PRKO-2420 : VIP is already started on node(s): rac1
- [root@rac2 ~]# srvctl start vip -i 20.20.20.111
- PRKO-2420 : VIP is already started on node(s): rac2
- [oracle@rac2 ~]$ ifconfig eth2:1
- eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
- inet addr:20.20.20.111 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- [root@rac2 admin]# crsctl stop res ora.rac1-vip2.vip
- CRS-2673: Attempting to stop 'ora.rac1-vip2.vip' on 'rac2'
- CRS-2677: Stop of 'ora.rac1-vip2.vip' on 'rac2' succeeded
- [root@rac2 admin]# crsctl start res ora.rac1-vip2.vip -n rac1
- CRS-2672: Attempting to start 'ora.net2.network' on 'rac1'
- CRS-2676: Start of 'ora.net2.network' on 'rac1' succeeded
- CRS-2672: Attempting to start 'ora.rac1-vip2.vip' on 'rac1'
- CRS-2676: Start of 'ora.rac1-vip2.vip' on 'rac1' succeeded
- [root@rac2 admin]# srvctl start vip -n rac2
- PRKO-2420 : VIP is already started on node(s): rac2
- [root@rac2 admin]# ifconfig eth2:1
- eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
- inet addr:20.20.20.222 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- [root@rac2 admin]# srvctl config vip -n rac1
- VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
- VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
- [root@rac2 admin]# srvctl config vip -n rac2
- VIP exists: /20.20.20.222/20.20.20.222/20.20.20.0/255.255.255.0/eth2, hosting node rac2
- VIP exists: /rac2-vip/192.168.28.222/192.168.28.0/255.255.255.0/eth0, hosting node rac2
- [root@rac2 admin]# srvctl status vip -n rac1
- VIP 20.20.20.111 is enabled
- VIP 20.20.20.111 is running on node: rac1
- VIP rac1-vip is enabled
- VIP rac1-vip is running on node: rac1
- [root@rac2 admin]# srvctl status vip -n rac2
- VIP 20.20.20.222 is enabled
- VIP 20.20.20.222 is running on node: rac2
- VIP rac2-vip is enabled
- VIP rac2-vip is running on node: rac2
- [root@rac2 admin]# crsctl stat res -t
- ---省略部分内容---
- --------------------------------------------------------------------------------
- Cluster Resources
- --------------------------------------------------------------------------------
- ora.LISTENER_SCAN1.lsnr
- 1 ONLINE ONLINE rac2
- ora.cvu
- 1 ONLINE ONLINE rac2
- ora.oc4j
- 1 ONLINE ONLINE rac1
- ora.proc.db
- 1 ONLINE ONLINE rac1 Open
- 2 ONLINE ONLINE rac2 Open
- ora.rac1-vip2.vip
- 1 ONLINE INTERMEDIATE rac1 FAILED OVER
- ora.rac1.vip
- 1 ONLINE ONLINE rac1
- ora.rac2-vip2.vip
- 1 ONLINE INTERMEDIATE rac2 FAILED OVER
- ora.rac2.vip
- 1 ONLINE ONLINE rac2
- ora.scan1.vip
- 1 ONLINE ONLINE rac2
- [root@rac2 admin]# srvctl stop vip -n rac1
- PRCR-1014 : Failed to stop resource ora.rac1.vip
- PRCR-1065 : Failed to stop resource ora.rac1.vip
- CRS-2529: Unable to act on 'ora.rac1.vip' because that would require stopping or relocating 'ora.LISTENER.lsnr', but the force option was not specified
- [root@rac2 admin]#
- [root@rac2 admin]# srvctl stop vip -n rac1 -f
- PRCC-1017 : 20.20.20.111 was already stopped on rac1
- PRCR-1005 : Resource ora.rac1-vip2.vip is already stopped
- [root@rac2 admin]# srvctl stop vip -n rac2 -f
- [root@rac2 admin]#
- [root@rac2 admin]# crsctl stop res ora.net2.network
- CRS-2673: Attempting to stop 'ora.net2.network' on 'rac2'
- CRS-2673: Attempting to stop 'ora.net2.network' on 'rac1'
- CRS-2677: Stop of 'ora.net2.network' on 'rac1' succeeded
- CRS-2677: Stop of 'ora.net2.network' on 'rac2' succeeded
- [root@rac2 admin]# srvctl config vip -n rac1
- VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
- VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
- [root@rac2 admin]# srvctl remove vip -i 20.20.20.111
- Please confirm that you intend to remove the VIPs 20.20.20.111 (y/[n]) y
- [root@rac2 admin]# srvctl remove vip -i 20.20.20.222
- Please confirm that you intend to remove the VIPs 20.20.20.222 (y/[n]) y
- [root@rac2 admin]# srvctl remove network -k 2
- PRCR-1001 : Resource ora.net2.network does not exist
- [root@rac2 admin]#
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/30174570/viewspace-2152633/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/30174570/viewspace-2152633/