第一:配置时间服务同步
A 在第一节点上作为时间服务器([root@node1 etc]# vi ntp.conf )
a 配置内容如下
server 127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
A 在第一节点上作为时间服务器([root@node1 etc]# vi ntp.conf )
a 配置内容如下
server 127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
b 检查服务,查看状态 停止然后启动服务
[root@node1 ~]# chkconfig ntpd on
[root@node1 ~]# service ntpd status
ntpd is stopped
[root@node2 etc]# service ntpd stop
Shutting down ntpd: [ OK ]
[root@node1 ~]# service ntpd start
Starting ntpd: [ OK ]
[root@node1 ~]# service ntpd status
ntpd (pid 4192) is running...
server 127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
[root@node1 ~]# chkconfig ntpd on
[root@node1 ~]# service ntpd status
ntpd is stopped
[root@node2 etc]# service ntpd stop
Shutting down ntpd: [ OK ]
[root@node1 ~]# service ntpd start
Starting ntpd: [ OK ]
[root@node1 ~]# service ntpd status
ntpd (pid 4192) is running...
server 127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
B 在第二节点上的时间与第一节点上的时间也一致([root@node2 etc]# vi ntp.conf )
a 修改以下的内容为如下:
server 192.168.189.138 prefer # local clock
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
b 修改以下语句
# restrict -6 default kod nomodify notrap nopeer noquery改变为以下
restrict -6 default kod nomodify notrap noquery
a 修改以下的内容为如下:
server 192.168.189.138 prefer # local clock
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
b 修改以下语句
# restrict -6 default kod nomodify notrap nopeer noquery改变为以下
restrict -6 default kod nomodify notrap noquery
c 停止然后启动服务
[root@node2 etc]# service ntpd stop
Shutting down ntpd: [ OK ]
[root@node2 etc]# service ntpd start
Starting ntpd: [ OK ]
[root@node2 etc]# service ntpd stop
Shutting down ntpd: [ OK ]
[root@node2 etc]# service ntpd start
Starting ntpd: [ OK ]
d 验证时间的一致性
[root@node2 etc]# date;ntpdate 192.168.189.138
Wed May 25 10:23:51 CST 2011
25 May 10:23:51 ntpdate[3690]: the NTP socket is in use, exiting
[root@node2 etc]# date;ntpdate 192.168.189.138
Wed May 25 10:23:51 CST 2011
25 May 10:23:51 ntpdate[3690]: the NTP socket is in use, exiting
第二:配置用户等效性
A 第一个节点上
[root@node1 ~]# su - oracle
[oracle@node1 ~]$ mkdir ~/.ssh
[oracle@node1 ~]$ chmod 700 ~/.ssh
[oracle@node1 ~]$ ssh-keygen -t rsa
[oracle@node1 ~]$ ssh-keygen -t dsa
B 第二个节点上
[root@node2 ~]# su - oracle
[oracle@node2 ~]$ mkdir ~/.ssh
[oracle@node2 ~]$ chmod 700 ~/.ssh
[oracle@node2 ~]$ ssh-keygen -t rsa
[oracle@node2 ~]$ ssh-keygen -t dsa
[root@node2 ~]# su - oracle
[oracle@node2 ~]$ mkdir ~/.ssh
[oracle@node2 ~]$ chmod 700 ~/.ssh
[oracle@node2 ~]$ ssh-keygen -t rsa
[oracle@node2 ~]$ ssh-keygen -t dsa
C 第一个节点上
[oracle@node1 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ ssh node2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ ssh node2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ ssh node2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
[oracle@node1 ~]$ scp ~/.ssh/authorized_keys node2:~/.ssh/authorized_keys
D 第二个节点上
E 测试同步
[oracle@node1 ~]$ ssh node1 date
[oracle@node1 ~]$ ssh node2 date
[oracle@node1 ~]$ ssh node1-priv date
[oracle@node1 ~]$ ssh node2-priv date
[oracle@node1 ~]$ ssh node1 date
[oracle@node1 ~]$ ssh node2 date
[oracle@node1 ~]$ ssh node1-priv date
[oracle@node1 ~]$ ssh node2-priv date
G 添加到内存中(只对当前的回话起作用)
第一个节点上
[oracle@node1 ~]$ exec /usr/bin/ssh-agent $SHELLs
s
第一个节点上
[oracle@node1 ~]$ exec /usr/bin/ssh-agent $SHELLs
s
[oracle@node1 ~]$ /usr/bin/ssh-add
Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)
第二个节点上
[oracle@node2 ~]$ exec /usr/bin/ssh-agent $SHELL
[oracle@node2 ~]$ /usr/bin/ssh-add
Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)
[oracle@node2 ~]$
Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)
第二个节点上
[oracle@node2 ~]$ exec /usr/bin/ssh-agent $SHELL
[oracle@node2 ~]$ /usr/bin/ssh-add
Identity added: /home/oracle/.ssh/id_rsa (/home/oracle/.ssh/id_rsa)
Identity added: /home/oracle/.ssh/id_dsa (/home/oracle/.ssh/id_dsa)
[oracle@node2 ~]$
第三:配置ASM
A 第一个节点上
[root@node1 ~]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
A 第一个节点上
[root@node1 ~]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: oracle
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm createdisk VOL1 /dev/sdd1
Marking disk "VOL1" as an ASM disk: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm createdisk VOL2 /dev/sde1
Marking disk "VOL2" as an ASM disk: [ OK ]
Marking disk "VOL1" as an ASM disk: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm createdisk VOL2 /dev/sde1
Marking disk "VOL2" as an ASM disk: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm listdisks
VOL1
VOL2
B 第二个节点上
[root@node2 ~]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm listdisks
VOL1
VOL2
B 第二个节点上
[root@node2 ~]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: oracle
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 ~]# /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 ~]# /etc/init.d/oracleasm listdisks
VOL1
VOL2
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 ~]# /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 ~]# /etc/init.d/oracleasm listdisks
VOL1
VOL2
第四:安装CRS
A 检查缺少什么包
[oracle@node1 cluvfy]$ ./runcluvfy.sh stage -pre crsinst -n node1,node2 -verbose
A 检查缺少什么包
[oracle@node1 cluvfy]$ ./runcluvfy.sh stage -pre crsinst -n node1,node2 -verbose
B 如果在第二个节点上运行root.sh碰到了如下错误:
/opt/ora10g/product/10.2.0/crs_1/jdk/jre//bin/java: error while loading shared libraries: libpthread.so.0:
cannot open shared object file: No such file or directory
需要做以下的操作
===============================
a 修改 vipca 文件
[root@node2 opt]# vi /opt/ora10g/product/10.2.0/crs_1/bin/vipca
找到如下内容:
Remove this workaround when the bug 3937317 is fixed
arch=`uname -m`
if [ "$arch" = "i686" -o "$arch" = "ia64" ]
then
LD_ASSUME_KERNEL=2.4.19
export LD_ASSUME_KERNEL
fi
#End workaround
在 fi 后新添加一行:
unset LD_ASSUME_KERNEL
/opt/ora10g/product/10.2.0/crs_1/jdk/jre//bin/java: error while loading shared libraries: libpthread.so.0:
cannot open shared object file: No such file or directory
需要做以下的操作
===============================
a 修改 vipca 文件
[root@node2 opt]# vi /opt/ora10g/product/10.2.0/crs_1/bin/vipca
找到如下内容:
Remove this workaround when the bug 3937317 is fixed
arch=`uname -m`
if [ "$arch" = "i686" -o "$arch" = "ia64" ]
then
LD_ASSUME_KERNEL=2.4.19
export LD_ASSUME_KERNEL
fi
#End workaround
在 fi 后新添加一行:
unset LD_ASSUME_KERNEL
b 修改 srvctl 文件
[root@node2 opt]# vi /opt/ora10g/product/10.2.0/crs_1/bin/srvctl
找到如下内容:
LD_ASSUME_KERNEL=2.4.19
export LD_ASSUME_KERNEL
同样在其后新增加一行:
unset LD_ASSUME_KERNEL
[root@node2 opt]# vi /opt/ora10g/product/10.2.0/crs_1/bin/srvctl
找到如下内容:
LD_ASSUME_KERNEL=2.4.19
export LD_ASSUME_KERNEL
同样在其后新增加一行:
unset LD_ASSUME_KERNEL
最后在node2 重新执行 root.sh
C 如果在二个节点上运行root.sh碰到了如下错误:
OUI-25031 some of the configuration assistants failed
需要在在第二个节点上运行
以ROOT身份执行[root@node2 bin]# pwd
/opt/ora10g/product/10.2.0/crs_1/bin
[root@node2 bin]# ./vipca 配置完成后 就可以确认了
D 通过以下命令查看CRS安装是否成功
[root@node2 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[root@node2 bin]# ./olsnodes
node1
node2
[root@node1 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE UNKNOWN node1
ora.node1.ons application ONLINE UNKNOWN node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE UNKNOWN node2
ora.node2.ons application ONLINE UNKNOWN node2
ora.node2.vip application ONLINE ONLINE node2
出现上面的情况需要在节点NODE1上关闭,启动服务就可以了
[root@node1 bin]# service init.crs stop
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
Shutdown has begun. The daemons should exit soon.
[root@node1 bin]# service init.crs start
Startup will be queued to init within 90 seconds.
查看结果
[root@node1 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE UNKNOWN node2
ora.node2.ons application ONLINE UNKNOWN node2
ora.node2.vip application ONLINE ONLINE node2
出现上面的情况需要在节点NODE2上同样关闭,启动服务就可以了
[root@node2 bin]# service init.crs stop
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
Shutdown has begun. The daemons should exit soon.
[root@node2 bin]# service init.crs start
Startup will be queued to init within 90 seconds.
查看结果
[root@node2 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE ONLINE node2
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip application ONLINE ONLINE node2
OUI-25031 some of the configuration assistants failed
需要在在第二个节点上运行
以ROOT身份执行[root@node2 bin]# pwd
/opt/ora10g/product/10.2.0/crs_1/bin
[root@node2 bin]# ./vipca 配置完成后 就可以确认了
D 通过以下命令查看CRS安装是否成功
[root@node2 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[root@node2 bin]# ./olsnodes
node1
node2
[root@node1 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE UNKNOWN node1
ora.node1.ons application ONLINE UNKNOWN node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE UNKNOWN node2
ora.node2.ons application ONLINE UNKNOWN node2
ora.node2.vip application ONLINE ONLINE node2
出现上面的情况需要在节点NODE1上关闭,启动服务就可以了
[root@node1 bin]# service init.crs stop
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
Shutdown has begun. The daemons should exit soon.
[root@node1 bin]# service init.crs start
Startup will be queued to init within 90 seconds.
查看结果
[root@node1 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE UNKNOWN node2
ora.node2.ons application ONLINE UNKNOWN node2
ora.node2.vip application ONLINE ONLINE node2
出现上面的情况需要在节点NODE2上同样关闭,启动服务就可以了
[root@node2 bin]# service init.crs stop
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
Shutdown has begun. The daemons should exit soon.
[root@node2 bin]# service init.crs start
Startup will be queued to init within 90 seconds.
查看结果
[root@node2 bin]# ./crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE ONLINE node2
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip application ONLINE ONLINE node2
第五 安装ORACLE 10G 数据库软件
第六 配置监听
确认监听配置成功
[oracle@node1 database]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....E1.lsnr application 0/5 0/0 ONLINE ONLINE node1
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node1
ora....E2.lsnr application 0/5 0/0 ONLINE ONLINE node2
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node2
[oracle@node1 database]$
第六 配置监听
确认监听配置成功
[oracle@node1 database]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....E1.lsnr application 0/5 0/0 ONLINE ONLINE node1
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node1
ora....E2.lsnr application 0/5 0/0 ONLINE ONLINE node2
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node2
[oracle@node1 database]$
第七 创建ASM
第八 创建数据库
验证
[oracle@node1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE OFFLINE
ora....E1.lsnr application 0/5 0/0 ONLINE OFFLINE
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node2
ora....SM2.asm application 0/5 0/0 ONLINE OFFLINE
ora....E2.lsnr application 0/5 0/0 ONLINE OFFLINE
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node1
ora.racdb.db application 0/1 0/1 OFFLINE OFFLINE
ora....b1.inst application 0/5 0/0 ONLINE OFFLINE
ora....b2.inst application 0/5 0/0 ONLINE OFFLINE
用命令一次启动和关闭相关进程
[root@rac2 bin]# ./crs_stop -all
[root@rac2 bin]# ./crs_start -all
第八 创建数据库
验证
[oracle@node1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE OFFLINE
ora....E1.lsnr application 0/5 0/0 ONLINE OFFLINE
ora.node1.gsd application 0/5 0/0 ONLINE ONLINE node1
ora.node1.ons application 0/3 0/0 ONLINE ONLINE node1
ora.node1.vip application 0/0 0/0 ONLINE ONLINE node2
ora....SM2.asm application 0/5 0/0 ONLINE OFFLINE
ora....E2.lsnr application 0/5 0/0 ONLINE OFFLINE
ora.node2.gsd application 0/5 0/0 ONLINE ONLINE node2
ora.node2.ons application 0/3 0/0 ONLINE ONLINE node2
ora.node2.vip application 0/0 0/0 ONLINE ONLINE node1
ora.racdb.db application 0/1 0/1 OFFLINE OFFLINE
ora....b1.inst application 0/5 0/0 ONLINE OFFLINE
ora....b2.inst application 0/5 0/0 ONLINE OFFLINE
用命令一次启动和关闭相关进程
[root@rac2 bin]# ./crs_stop -all
[root@rac2 bin]# ./crs_start -all
第九 验证
把以下的内容COPY 到 C:\Windows\System32\drivers\etc hosts下
192.168.189.138 node1
192.168.189.139 node2
把以下的内容COPY 到 C:\Windows\System32\drivers\etc hosts下
192.168.189.138 node1
192.168.189.139 node2
192.168.100.138 node1-vip
192.168.100.139 node2-vip
192.168.100.139 node2-vip
把以下内容添加到客户端的tnsnames.ora中
RACDB =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = node1-vip)(PORT = 1521))
(ADDRESS = (PROTOCOL = TCP)(HOST = node2-vip)(PORT = 1521))
(LOAD_BALANCE = yes)
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = racdb)
(FAILOVER_MODE=
(TYPE=session)
(METHOD=basic)
(RETRIES=180)
(DELAY=5)
)
)
)
RACDB =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = node1-vip)(PORT = 1521))
(ADDRESS = (PROTOCOL = TCP)(HOST = node2-vip)(PORT = 1521))
(LOAD_BALANCE = yes)
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = racdb)
(FAILOVER_MODE=
(TYPE=session)
(METHOD=basic)
(RETRIES=180)
(DELAY=5)
)
)
)
A 体验 failover
a 连接到RAC
sqlplus system/123456@RACDB
b 确认用户连接到的当前的实例
select instance_name from v$instance;
c 关闭连接到的当前实例
shutdown abort;
d 等待一会后 再查看连接的实例
select instance_name from v$instance;
B 体验 loadBalance
a test.sh
#!/bin/sh
count=0
while [$count -lt $2]
do
count='expr $count +1'
sqlplus -s username/password@$1 @test.sql
sleep 1
done
a 连接到RAC
sqlplus system/123456@RACDB
b 确认用户连接到的当前的实例
select instance_name from v$instance;
c 关闭连接到的当前实例
shutdown abort;
d 等待一会后 再查看连接的实例
select instance_name from v$instance;
B 体验 loadBalance
a test.sh
#!/bin/sh
count=0
while [$count -lt $2]
do
count='expr $count +1'
sqlplus -s username/password@$1 @test.sql
sleep 1
done
b 查看实例名称
select instance_name from v$instance
c 执行脚本
./test.sh racdb 1000
d 脚本执行完后,查看每个实例建立的连接数量
select inst_id,count(*) from gv$session;
select instance_name from v$instance
c 执行脚本
./test.sh racdb 1000
d 脚本执行完后,查看每个实例建立的连接数量
select inst_id,count(*) from gv$session;
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/20976446/viewspace-696365/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/20976446/viewspace-696365/