1、Ip划分
192.168.1.215        rac1
192.168.1.217        rac2
192.168.1.219        rac3
10.0.0.215           rac1-priv
10.0.0.217           rac2-priv
192.168.1.216        rac1-vip
192.168.1.218        rac2-vip
192.168.1.220        iscsid


2、配置Iscsi服务器
安装:
yum install scsi-target-utils*

启动服务
/etc/init.d/tgtd start

设为开机自启动:
chkconfig tgtd on

查看侦听端口状态:
netstat -anlpt | grep 3260

vi /etc/tgt/targets.conf
<target iqn.2012-12.com.oracle.blues:luns1>
       backing-store /dev/sda5
       backing-store /dev/sda6
       backing-store /dev/sda7
       backing-store /dev/sda8
       backing-store /dev/sda9
       initiator-address 192.168.1.0/24
</target>

vi /etc/udev/scripts/iscsidev.sh
#!/bin/bash
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
if [ -z "${target_name}" ] ; then
       exit 1
fi
echo "${target_name##*:}"

chmod +x /etc/udev/scripts/iscsidev.sh

vi /etc/rc.local
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL

重新扫描服务器
iscsiadm -m session -u
iscsiadm -m discovery -t sendtargets -p 192.168.1.220
service iscsi restart

service tgtd restart
tgtadm --lld iscsi --mode target  --op show

3、导入磁盘
vi /etc/udev/rules.d/55-openiscsi.rules
KERNEL=="sd*",BUS=="scsi",PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c"

vi /etc/udev/scripts/iscsidev.sh

#!/bin/bash
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
if [ -z "${target_name}" ] ; then
      exit 1
fi
echo "${target_name##*:}"

chmod +x /etc/udev/scripts/iscsidev.sh

service iscsi restart
iscsiadm -m discovery -t sendtargets -p 192.168.1.219 -l
service iscsi restart
fdisk -l


3、创建用户及组、环境变量(分别在rac1、rac2、rac3)

groupadd oinstall
groupadd dba
useradd -g oinstall -G dba oracle
passwd  oracle


mkdir -p /u01/app/oracle/
chown -R oracle:oinstall /u01
chmod -R 775 /u01


export PATH
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
export CRS_HOME=/u01/app/crs_1
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_OWNER=oracle
export ORACLE_SID=dbrac1
export ORACLE_TERM=vt100
export THREADS_FLAG=native
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export PATH=$ORACLE_HOME/bin:$PATH
export SQLPATH=/home/oracle
export EDITOR=vi


5、建立用户等效性
mkdir ~/.ssh
chmod 700 ~/.ssh
ssh-keygen -t rsa
ssh-keygen -t dsa

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac1:~/.ssh/authorized_keys

6、同步时间
vi /etc/ntp.conf
server  127.127.1.0     # local clock
fudge   127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008

service ntpd restart

vi /etc/ntp.conf
server  192.168.1.210 # local clock
#fudge  127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008

service ntpd restart

7、修改/etc/sysctl.conf ,添加这些kernel 参数
vi /etc/sysctl.conf
net.core.rmem_default=262144
net.core.wmem_default=262144
net.core.rmem_max=262144
net.core.wmem_max=262144
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 65536
net.ipv4.ip_local_port_range = 1024 65000
sysctl -p 立刻生效

vi /etc/security/limits.conf
oracle           soft    nofile          65536
oracle           hard    nofile          65536
oracle           soft    nproc           65536
oracle           hard    nproc           65536

8、配置 hangcheck-timer 模块
查看模块位置:
find /lib/modules -name "hangcheck-timer.ko"

配置系统启动时自动加载模块
vi /etc/rc.d/rc.local
modprobe hangcheck-timer

配置hangcheck-timer参数
vi /etc/modprobe.conf
options hangcheck-timer hangcheck_tick=30 hangcheck_margin=30

确认模块加载成功
grep Hangcheck /var/log/messages | tail -2

9、划分存储磁盘
fdisk /dev/sdb
Command (m for help): n
Command action
  e   extended
  p   primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1017, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-1017, default 1017):
Using default value 1017

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

partprobe

10、配置raw 设备
/dev/sdf1
/dev/sdg1
/dev/sdh1
/dev/sdi1
/dev/sdj1

/dev/sdb1
/dev/sdc1
/dev/sdd1
/dev/sde1


vi /etc/udev/rules.d/60-raw.rules
ACTION=="add", KERNEL=="sdf1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", KERNEL=="sdg1", RUN+="/bin/raw /dev/raw/raw2 %N"
ACTION=="add", KERNEL=="sdh1", RUN+="/bin/raw /dev/raw/raw3 %N"
ACTION=="add", KERNEL=="sdi1", RUN+="/bin/raw /dev/raw/raw4 %N"
ACTION=="add", KERNEL=="sdj1", RUN+="/bin/raw /dev/raw/raw5 %N"
KERNEL=="raw[1-5]", OWNER="oracle", GROUP="oinstall", MODE="640"


同步
start_udev

查看
ll /dev/raw/raw*

11、安装配置ASM
rpm -ivh oracleasm-support-2.1.7-1.el5.x86_64.rpm
rpm -ivh oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm
rpm -ivh oracleasmlib-2.0.4-1.el5.x86_64.rpm


/etc/init.d/oracleasm configure

/etc/init.d/oracleasm createdisk VOL1 /dev/sdb1
/etc/init.d/oracleasm createdisk VOL2 /dev/sdc1
/etc/init.d/oracleasm createdisk VOL3 /dev/sdd1

删除ASM磁盘
/etc/init.d/oracleasm deletedisk  VOL1
/etc/init.d/oracleasm deletedisk  VOL2
/etc/init.d/oracleasm deletedisk  VOL3


/etc/init.d/oracleasm scandisks
/etc/init.d/oracleasm listdisks

12、解压软件
gzip -d 10201_clusterware_linux_x86_64.cpio.gz

cpio -idvm <10201_clusterware_linux_x86_64.cpio
cpio -idvm <10201_database_linux_x86_64.cpio
unzip p6810189_10204_Linux-x86-64.zip

chown -R oracle:oinstall clusterware/ database/ Disk1/

yum -y install glibc-devel*
yum  -y install gcc*
yum  -y install libXp*
yum  -y install compat-db-4*
yum  -y install libaio-0*
yum  -y install compat-libstdc++-33-3*
yum  -y install compat-gcc-34-3*
yum  -y install libgomp-*
yum  -y install glibc-*

chkconfig sendmail off


重启系统

13、安装clusterware
查看日志情况
tail -f /var/log/messages

查看服务节点
./runcluvfy.sh stage -pre crsinst -n node1,node2 -verbose

[oracle@rac1 cluvfy]$ ./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose

执行 群集服务设置 的预检查

正在检查节点的可访问性...

检查: 节点 "rac1" 的节点可访问性
 目标节点                                  是否可访问?                  
 ------------------------------------  ------------------------
 rac2                                  是                      
 rac1                                  是                      
结果:节点 "rac1" 的节点可访问性检查已通过。


正在检查等同用户...

检查: 用户 "oracle" 的等同用户
 节点名                                   注释                      
 ------------------------------------  ------------------------
 rac2                                  通过                      
 rac1                                  通过                      
结果:用户 "oracle" 的等同用户检查已通过。

正在检查管理权限...

检查: 用户 "oracle" 的存在性
 节点名           用户存在                      注释                      
 ------------  ------------------------  ------------------------
 rac2          是                         通过                      
 rac1          是                         通过                      
结果:"oracle" 的用户存在性检查已通过。

检查: 组 "oinstall" 的存在性
 节点名           状态                        组 ID                    
 ------------  ------------------------  ------------------------
 rac2          存在                        500                    
 rac1          存在                        500                    
结果:"oinstall" 的组存在性检查已通过。

检查: 组 "oinstall" 中用户 "oracle" 的成员资格 [作为 主]
 节点名               用户存在          组存在           组中的用户         主             注释          
 ----------------  ------------  ------------  ------------  ------------  ------------
 rac2              是             是             是             是             通过          
 rac1              是             是             是             是             通过          
结果:组 "oinstall" 中用户 "oracle" 的成员资格检查 [作为 主] 已通过。

管理权限检查已通过。

正在检查节点的连接性...


节点 "rac2" 的接口信息
 接口名                             IP 地址                           子网              
 ------------------------------  ------------------------------  ----------------
 eth0                            192.168.1.217                   192.168.1.0    
 eth1                            10.0.0.217                      10.0.0.0        


节点 "rac1" 的接口信息
 接口名                             IP 地址                           子网              
 ------------------------------  ------------------------------  ----------------
 eth0                            192.168.1.215                   192.168.1.0    
 eth1                            10.0.0.215                      10.0.0.0        


检查: 子网 "192.168.1.0" 的节点连接性
 源                               目标                              是否已连接?          
 ------------------------------  ------------------------------  ----------------
 rac2:eth0                       rac1:eth0                       是              
结果:含有节点 rac2,rac1 的子网 "192.168.1.0" 的节点连接性检查已通过。

检查: 子网 "10.0.0.0" 的节点连接性
 源                               目标                              是否已连接?          
 ------------------------------  ------------------------------  ----------------
 rac2:eth1                       rac1:eth1                       是              
结果:含有节点 rac2,rac1 的子网 "10.0.0.0" 的节点连接性检查已通过。

子网 "192.168.1.0" 上用于专用互联的合适接口:
rac2 eth0:192.168.1.217
rac1 eth0:192.168.1.215

子网 "10.0.0.0" 上用于专用互联的合适接口:
rac2 eth1:10.0.0.217
rac1 eth1:10.0.0.215

ERROR:
找不到用于 VIP 的合适接口集。

结果:节点的连接性检查失败。


正在检查其系统要求 'crs'...
没有为此产品注册检查。

在所有节点上预检查 群集服务设置 失败。

[oracle@rac1 clusterware]$ ./runInstaller
********************************************************************************

Please run the script rootpre.sh as root on all machines/nodes. The script can be found at the toplevel of the CD or stage-area. Once you have run the script, please type Y to proceed

Answer 'y' if root has run 'rootpre.sh' so you can proceed with Oracle Clusterware installation.
Answer 'n' to abort installation and then ask root to run 'rootpre.sh'.

********************************************************************************

Has 'rootpre.sh' been run by root? [y/n] (n)
y

正在启动 Oracle Universal Installer...

正在检查安装程序要求...

检查操作系统版本: 必须是redhat-3, SuSE-9, redhat-4, UnitedLinux-1.0, asianux-1 or asianux-2
                                     通过


所有安装程序要求均已满足。

准备从以下地址启动 Oracle Universal Installer /tmp/OraInstall2013-01-27_02-51-44PM. 请稍候...[oracle@rac1 clusterware]$ Oracle Universal Installer, 版本 10.2.0.1.0 正版
版权所有 (c) 1999, 2005, Oracle。保留所有权利。
[oracle@rac1 clusterware]$ export LANG=en
[oracle@rac1 clusterware]$ ./runInstaller

安装日志:
[root@rac1 ~]# tail -f /u01/app/oracle/oraInventory/logs/installActions2013-01-27_02-53-22PM.log


运行脚本
[root@rac1 ~]# /u01/app/oracle/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oracle/oraInventory to 770.
Changing groupname of /u01/app/oracle/oraInventory to oinstall.
The execution of the script is complete
[root@rac1 ~]# /u01/app/oracle/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oracle/oraInventory to 770.
Changing groupname of /u01/app/oracle/oraInventory to oinstall.
The execution of the script is complete

[root@rac1 ~]# /u01/app/crs_1/root.sh
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
Checking to see if Oracle CRS stack is already configured
/etc/oracle does not exist. Creating it now.

Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node <nodenumber>: <nodename> <private interconnect name> <hostname>
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
Now formatting voting device: /dev/raw/raw3
Now formatting voting device: /dev/raw/raw4
Now formatting voting device: /dev/raw/raw5
Format of 3 voting devices complete.
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
CSS is inactive on these nodes.
rac2
Local node checking complete.
Run root.sh on remaining nodes to start CRS daemons.


[root@rac1 ~]# tail -f /var/log/messages
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdg, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdg' to turn on SMART features
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdh, opened
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdh, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdh' to turn on SMART features
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdi, opened
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdi, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdi' to turn on SMART features
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdj, opened
Jan 27 14:42:49 rac1 smartd[4033]: Device: /dev/sdj, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdj' to turn on SMART features
Jan 27 14:42:49 rac1 smartd[4033]: Monitoring 0 ATA and 0 SCSI devices
Jan 27 14:42:49 rac1 smartd[4035]: smartd has fork()ed into background mode. New PID=4035.
Jan 27 14:42:50 rac1 avahi-daemon[3996]: Server startup complete. Host name is rac1.local. Local service cookie is 2952063156.
Jan 27 15:02:50 rac1 root: Oracle Cluster Ready Services starting by user request.
Jan 27 15:02:50 rac1 root: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:03:50 rac1 init: Re-reading inittab
Jan 27 15:04:00 rac1 init: Re-reading inittab
Jan 27 15:04:01 rac1 logger: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:04:01 rac1 last message repeated 2 times
Jan 27 15:05:01 rac1 logger: Running CRSD with TZ =


[root@rac2 ~]# cd /u01/app/crs_1/bin/

[root@rac2 bin]# vi +123 vipca
unset LD_ASSUME_KERNEL

[root@rac2 bin]# vi + srvctl
unset LD_ASSUME_KERNEL

[root@rac2 ~]# /u01/app/oracle/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oracle/oraInventory to 770.
Changing groupname of /u01/app/oracle/oraInventory to oinstall.
The execution of the script is complete

[root@rac2 bin]# /u01/app/crs_1/root.sh
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
Checking to see if Oracle CRS stack is already configured
/etc/oracle does not exist. Creating it now.

Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node <nodenumber>: <nodename> <private interconnect name> <hostname>
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
clscfg: Arguments check out successfully.

NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
rac2
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
Error 0(Native: listNetInterfaces:[3])
 [Error 0(Native: listNetInterfaces:[3])]


[root@rac2 ~]# tail -f /var/log/messages
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdg, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdg' to turn on SMART features
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdh, opened
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdh, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdh' to turn on SMART features
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdi, opened
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdi, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdi' to turn on SMART features
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdj, opened
Jan 27 14:42:57 rac2 smartd[4034]: Device: /dev/sdj, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdj' to turn on SMART features
Jan 27 14:42:57 rac2 smartd[4034]: Monitoring 0 ATA and 0 SCSI devices
Jan 27 14:42:57 rac2 smartd[4036]: smartd has fork()ed into background mode. New PID=4036.
Jan 27 14:42:57 rac2 avahi-daemon[3997]: Server startup complete. Host name is rac2.local. Local service cookie is 1078722335.
Jan 27 15:10:29 rac2 root: Oracle Cluster Ready Services starting by user request.
Jan 27 15:10:29 rac2 root: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:11:29 rac2 init: Re-reading inittab
Jan 27 15:11:39 rac2 init: Re-reading inittab
Jan 27 15:11:39 rac2 logger: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:11:40 rac2 last message repeated 2 times
Jan 27 15:12:39 rac2 logger: Running CRSD with TZ =

Error 0(Native: listNetInterfaces:[3])
 [Error 0(Native: listNetInterfaces:[3])]
[root@rac2 bin]# cd /u01/app/crs_1/bin
[root@rac2 bin]# ./oifcfg iflist
eth0  192.168.1.0
eth1  10.0.0.0
[root@rac2 bin]# ./oifcfg setif -global eth0/192.168.1.0:public
[root@rac2 bin]# ./oifcfg setif -global eth1/10.0.0.0:cluster_interconnect
[root@rac2 bin]#  ./oifcfg getif
eth0  192.168.1.0  global  public
eth1  10.0.0.0  global  cluster_interconnect

[root@rac2 bin]# ./vipca

查看集群服务状态:
./crs_stat -t -v

[root@rac2 bin]# ./crs_stat -t -v
Name           Type           R/RA   F/FT   Target    State     Host        
----------------------------------------------------------------------
ora.rac1.gsd   application    0/5    0/0    ONLINE    ONLINE    rac1        
ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1        
ora.rac1.vip   application    0/0    0/0    ONLINE    ONLINE    rac1        
ora.rac2.gsd   application    0/5    0/0    ONLINE    ONLINE    rac2        
ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2        
ora.rac2.vip   application    0/0    0/0    ONLINE    ONLINE    rac2

重启系统看集群心跳是否能起来
reboot

查看两个节点集群日志情况
[root@rac1 ~]# tail -f /var/log/messages
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdh, opened
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdh, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdh' to turn on SMART features
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdi, opened
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdi, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdi' to turn on SMART features
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdj, opened
Jan 27 15:22:13 rac1 smartd[4002]: Device: /dev/sdj, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdj' to turn on SMART features
Jan 27 15:22:13 rac1 smartd[4002]: Monitoring 0 ATA and 0 SCSI devices
Jan 27 15:22:13 rac1 smartd[4004]: smartd has fork()ed into background mode. New PID=4004.
Jan 27 15:22:13 rac1 avahi-daemon[3965]: Server startup complete. Host name is rac1.local. Local service cookie is 2709386848.
Jan 27 15:22:21 rac1 logger: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:22:21 rac1 last message repeated 2 times
Jan 27 15:23:21 rac1 logger: Running CRSD with TZ =
Jan 27 15:23:35 rac1 avahi-daemon[3965]: Registering new address record for 192.168.1.216 on eth0.
Jan 27 15:23:35 rac1 avahi-daemon[3965]: Withdrawing address record for 192.168.1.216 on eth0.
Jan 27 15:23:35 rac1 avahi-daemon[3965]: Registering new address record for 192.168.1.218 on eth0.
Jan 27 15:23:59 rac1 avahi-daemon[3965]: Withdrawing address record for 192.168.1.218 on eth0.
Jan 27 15:24:38 rac1 avahi-daemon[3965]: Registering new address record for 192.168.1.216 on eth0.

[root@rac2 ~]# tail -f /var/log/messages
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdh, opened
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdh, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdh' to turn on SMART features
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdi, opened
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdi, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdi' to turn on SMART features
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdj, opened
Jan 27 15:22:34 rac2 smartd[4035]: Device: /dev/sdj, IE (SMART) not enabled, skip device Try 'smartctl -s on /dev/sdj' to turn on SMART features
Jan 27 15:22:34 rac2 smartd[4035]: Monitoring 0 ATA and 0 SCSI devices
Jan 27 15:22:34 rac2 smartd[4037]: smartd has fork()ed into background mode. New PID=4037.
Jan 27 15:22:35 rac2 avahi-daemon[3998]: Server startup complete. Host name is rac2.local. Local service cookie is 532088566.
Jan 27 15:22:47 rac2 logger: Cluster Ready Services completed waiting on dependencies.
Jan 27 15:22:47 rac2 last message repeated 2 times
Jan 27 15:23:47 rac2 logger: Running CRSD with TZ =
Jan 27 15:24:02 rac2 avahi-daemon[3998]: Registering new address record for 192.168.1.218 on eth0.

查看集群服务状态:
[root@rac1 bin]# ./crs_stat -t -v
Name           Type           R/RA   F/FT   Target    State     Host        
----------------------------------------------------------------------
ora.rac1.gsd   application    0/5    0/0    ONLINE    ONLINE    rac1        
ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1        
ora.rac1.vip   application    0/0    0/0    ONLINE    ONLINE    rac1        
ora.rac2.gsd   application    0/5    0/0    ONLINE    ONLINE    rac2        
ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2        
ora.rac2.vip   application    0/0    0/0    ONLINE    ONLINE    rac2    


15、安装ORACLE数据软件
[oracle@rac1 database]$ export LANG=en
[oracle@rac1 database]$ ./runInstaller
正在启动 Oracle Universal Installer...

正在检查安装程序要求...

检查操作系统版本: 必须是redhat-3, SuSE-9, redhat-4, UnitedLinux-1.0, asianux-1 or asianux-2
                                     通过


所有安装程序要求均已满足。

准备从以下地址启动 Oracle Universal Installer /tmp/OraInstall2013-01-27_03-28-21PM. 请稍候...[oracle@rac1 database]$ Oracle Universal Installer, 版本 10.2.0.1.0 正式版
版权所有 (c) 1999, 2005, Oracle。保留所有权利。

查看安装日志:
[root@rac1 ~]# tail -f /u01/app/oracle/oraInventory/logs/installActions2013-01-27_03-29-59PM.log

在rac1上运行脚本
[root@rac1 ~]# /u01/app/oracle/product/10.2.0/db_1/root.sh
Running Oracle10 root.sh script...

The following environment variables are set as:
   ORACLE_OWNER= oracle
   ORACLE_HOME=  /u01/app/oracle/product/10.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]:
  Copying dbhome to /usr/local/bin ...
  Copying oraenv to /usr/local/bin ...
  Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.

在rac2运行脚本
[root@rac2 ~]# /u01/app/oracle/product/10.2.0/db_1/root.sh
Running Oracle10 root.sh script...

The following environment variables are set as:
   ORACLE_OWNER= oracle
   ORACLE_HOME=  /u01/app/oracle/product/10.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]:  
  Copying dbhome to /usr/local/bin ...
  Copying oraenv to /usr/local/bin ...
  Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.


16、oracle数据库软件升级
[oracle@rac1 Disk1]$ ./runInstaller
Starting Oracle Universal Installer...

Checking installer requirements...

Checking operating system version: must be redhat-3, SuSE-9, SuSE-10, redhat-4, redhat-5, UnitedLinux-1.0, asianux-1, asianux-2 or asianux-3
                                     Passed


All installer requirements met.

Preparing to launch Oracle Universal Installer from /tmp/OraInstall2013-01-27_03-41-56PM. Please wait ...[oracle@rac1 Disk1]$ Oracle Universal Installer, Version 10.2.0.4.0 Production
Copyright (C) 1999, 2008, Oracle. All rights reserved.

查看安装日志
tail -f /u01/app/oracle/oraInventory/logs/installActions2013-01-27_03-41-56PM.log


运行脚本
[root@rac1 ~]# /u01/app/oracle/product/10.2.0/db_1/root.sh
Running Oracle10 root.sh script...

The following environment variables are set as:
   ORACLE_OWNER= oracle
   ORACLE_HOME=  /u01/app/oracle/product/10.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]:
The file "dbhome" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying dbhome to /usr/local/bin ...
The file "oraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying oraenv to /usr/local/bin ...
The file "coraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying coraenv to /usr/local/bin ...

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.


[root@rac2 ~]# /u01/app/oracle/product/10.2.0/db_1/root.sh
Running Oracle10 root.sh script...

The following environment variables are set as:
   ORACLE_OWNER= oracle
   ORACLE_HOME=  /u01/app/oracle/product/10.2.0/db_1

Enter the full pathname of the local bin directory: [/usr/local/bin]:
The file "dbhome" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying dbhome to /usr/local/bin ...
The file "oraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying oraenv to /usr/local/bin ...
The file "coraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)
[n]: y
  Copying coraenv to /usr/local/bin ...

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.

16、netca配置监听
[oracle@rac1 Disk1]$ netca

[oracle@rac1 Disk1]$ cd /u01/app/crs_1/bin/
[oracle@rac1 bin]$ ./crs_stat -t -v
Name           Type           R/RA   F/FT   Target    State     Host        
----------------------------------------------------------------------
ora....C1.lsnr application    0/5    0/0    ONLINE    ONLINE    rac1        
ora.rac1.gsd   application    0/5    0/0    ONLINE    ONLINE    rac1        
ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1        
ora.rac1.vip   application    0/0    0/0    ONLINE    ONLINE    rac1        
ora....C2.lsnr application    0/5    0/0    ONLINE    ONLINE    rac2        
ora.rac2.gsd   application    0/5    0/0    ONLINE    ONLINE    rac2        
ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2        
ora.rac2.vip   application    0/0    0/0    ONLINE    ONLINE    rac2  

17、dbca配置