CENTOS6安装ORA11G(RAC)两节点(一)

虚拟机配置,两节点各配置双网卡,配置添加9个共享磁盘

在这里插入图片描述
3个VOTING DISK(各2G), 3个DATA DISK(各20G), 3个RECOV DISK(各15G)
在这里插入图片描述

需要将9个磁盘以共享模式添加给两台虚拟机
在这里插入图片描述
编辑虚拟机vmx文件,增加内容如下,不锁定,生成UUID,共享模式
在这里插入图片描述

disk.locking=false
disk.EnableUUID="TRUE"
scsi0:1.sharedBus = "virtual"
scsi0:2.sharedBus = "virtual"
scsi0:3.sharedBus = "virtual"
scsi0:4.sharedBus = "virtual"
scsi0:5.sharedBus = "virtual"
scsi0:6.sharedBus = "virtual"
scsi0:8.sharedBus = "virtual"
scsi0:9.sharedBus = "virtual"
scsi0:10.sharedBus = "virtual"

删除70文件中绑定的网卡MAC配置, 开机后会自动生成
[root@rac1 ~ 11:22 9]# vim /etc/udev/rules.d/70-persistent-net.rules

复制虚拟机后,网卡MAC地址需要重新生成
在这里插入图片描述
复制配置文件修改
[root@rac1 /etc/sysconfig/network-scripts 11:20 3]# cp -p ifcfg-eth0 ifcfg-eth1
[root@rac1 /etc/sysconfig/network-scripts 11:24 10]# cat ifcfg-eth0

DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
IPADDR=192.168.31.31
PREFIX=24
GATEWAY=192.168.31.1
DNS1=223.5.5.5
DOMAIN=rac.com
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"

[root@rac1 /etc/sysconfig/network-scripts 11:24 11]# cat ifcfg-eth1

DEVICE=eth1
TYPE=Ethernet
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=none
IPADDR=10.10.10.31
PREFIX=24
GATEWAY=10.10.10.31
DNS1=223.5.5.5
DOMAIN=rac.com
DEFROUTE=no
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth1"

[root@rac1 ~ 13:21 1]# lsblk

NAME                        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda                           8:0    0   36G  0 disk 
├─sda1                        8:1    0  1.1G  0 part /boot
├─sda2                        8:2    0  5.9G  0 part [SWAP]
└─sda3                        8:3    0 29.1G  0 part 
  └─vg_host-LogVol00 (dm-0) 253:0    0 29.1G  0 lvm  /
sdb                           8:16   0    2G  0 disk 
sdc                           8:32   0    2G  0 disk 
sdd                           8:48   0    2G  0 disk 
sde                           8:64   0   20G  0 disk 
sdf                           8:80   0   20G  0 disk 
sdh                           8:112  0   15G  0 disk 
sdi                           8:128  0   15G  0 disk 
sdg                           8:96   0   20G  0 disk 
sdj                           8:144  0   15G  0 disk

编辑sh脚本,生成99规则文件,裸设备UUID

#!/bin/bash
for i in b c d e f g h i j;
do
    echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >> /etc/udev/rules.d/99-oracle-asmdevices.rules
done

[root@rac1 /etc/udev/rules.d 13:25 6]# cat 99-oracle-asmdevices.rules

KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c298a2b2d3794309e171897c26ad", NAME="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c294d5654581d0e4423163311fc7", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c29b002fde9503d35b5f29f3ff65", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c297e4b5edee0a8ce2cab38e3c7a", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c29069d43173b9fad5b80d138605", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c29b99539ce226192e2eb071ba60", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c2909c1113d7628f9cc1536485b1", NAME="asm-diskh", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c29977e08fad615bcc2c67351cb7", NAME="asm-diski", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="36000c2944e293dd33db9d9fa9cc70715", NAME="asm-diskj", OWNER="grid", GROUP="asmadmin", MODE="0660"

[root@rac1 ~ 13:27 2]# ll /dev/asm*

brw-rw---- 1 root root 8,  16 2022-08-06 13:27:05 /dev/asm-diskb
brw-rw---- 1 root root 8,  32 2022-08-06 13:27:05 /dev/asm-diskc
brw-rw---- 1 root root 8,  48 2022-08-06 13:27:05 /dev/asm-diskd
brw-rw---- 1 root root 8,  64 2022-08-06 13:27:05 /dev/asm-diske
brw-rw---- 1 root root 8,  80 2022-08-06 13:27:05 /dev/asm-diskf
brw-rw---- 1 root root 8,  96 2022-08-06 13:27:06 /dev/asm-diskg
brw-rw---- 1 root root 8, 112 2022-08-06 13:27:06 /dev/asm-diskh
brw-rw---- 1 root root 8, 128 2022-08-06 13:27:05 /dev/asm-diski
brw-rw---- 1 root root 8, 144 2022-08-06 13:27:05 /dev/asm-diskj

配置NTP,使用本地时间

[root@rac1 ~ 13:30 6]# yum -y install ntp
注释掉

#restrict default kod nomodify notrap nopeer noquery
#restrict -6 default kod nomodify notrap nopeer noquery
#restrict -6 ::1
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst

配置文件增加

restrict default nomodify
server 127.127.1.0
fudge 127.127.1.0 stratum 1

[root@host ~ 18:09 7]# vim /etc/sysconfig/ntpd

OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid -g"

[root@host ~ 16:40 20]# service ntpd start && service ntpd status && chkconfig ntpd on && chkconfig --list | grep ntpd

配置DNS

[root@rac1 ~ 16:50 32]# yum -y install bind*
[root@rac1 ~ 16:50 33]# vim /etc/named.conf

listen-on port 53 { any; };
//listen-on-v6 port 53 { ::1; };
allow-query     { any; };
dnssec-enable no;
dnssec-validation no;

[root@rac1 ~ 16:54 35]# vim /etc/named.rfc1912.zones
增加内容

zone "rac.com" IN {
        type master;
        file "named.rac.localhost";
        allow-update { none; };
};
zone "10.10.10.in-addr.arpa" IN {
        type master;
        file "named.rac10.loopback";
        allow-update { none; };
};
zone "31.168.192.in-addr.arpa" IN {
        type master;
        file "named.rac31.loopback";
        allow-update { none; };
};

[root@rac1 ~ 16:56 48]# cd /var/named
[root@rac1 /var/named 16:59 50]# cp -p named.localhost named.rac.localhost
[root@rac1 /var/named 16:59 51]# cp -p named.loopback named.rac31.loopback && cp -p named.loopback named.rac10.loopback

[root@host /var/named 17:56 158]# ll
total 44
drwxr-x--- 7 root  named 4096 2022-08-03 08:17:10 chroot
drwxrwx--- 2 named named 4096 2022-08-06 17:16:15 data
drwxrwx--- 2 named named 4096 2022-08-06 17:16:43 dynamic
-rw-r----- 1 root  named 3289 2017-04-11 23:01:00 named.ca
-rw-r----- 1 root  named  152 2009-12-15 20:27:51 named.empty
-rw-r----- 1 root  named  152 2007-06-21 18:09:43 named.localhost
-rw-r----- 1 root  named  168 2009-12-15 20:27:43 named.loopback
-rw-r----- 1 root  named  196 2022-08-06 17:55:25 named.rac10.loopback
-rw-r----- 1 root  named  287 2022-08-06 17:55:54 named.rac31.loopback
-rw-r----- 1 root  named  326 2022-08-06 17:45:41 named.rac.localhost
drwxrwx--- 2 named named 4096 2020-11-07 20:00:41 slaves

[root@rac1 ~] vim named.rac.localhost
修改配置

$TTL 1D
@       IN SOA  @ rac.com. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        NS      @
        A       127.0.0.1
rac1    A       192.168.31.31
rac2    A       192.168.31.32
rac1-priv       A       10.10.10.31
rac2-priv       A       10.10.10.32
rac1-vip        A       192.168.31.33
rac2-vip        A       192.168.31.34
rac-scan        A       192.168.31.30
rac-scan        A       192.168.31.35

[root@rac1 /var/named 17:37 104]# vim named.rac10.loopback
修改配置

$TTL 1D
@       IN SOA  @ rac.com. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        NS      rac.com.
        PTR     rac.com.
31      PTR     rac1-priv.rac.com.
32      PTR     rac2-priv.rac.com.

[root@rac1 /var/named 17:37 104]# vim named.rac31.loopback
修改配置

$TTL 1D
@       IN SOA  @ rac.com. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        NS      rac.com.
        PTR     rac.com.
31      PTR     rac1.rac.com.
32      PTR     rac2.rac.com.
33      PTR     rac1-vip.rac.com.
34      PTR     rac2-vip.rac.com.
30      PTR     rac-scan.rac.com.
35      PTR     rac-scan.rac.com.

[root@rac1 /var/named 17:39 106]# named-checkconf -z /etc/named.conf
检查配置文件

zone localhost.localdomain/IN: loaded serial 0
zone localhost/IN: loaded serial 0
zone 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa/IN: loaded serial 0
zone 1.0.0.127.in-addr.arpa/IN: loaded serial 0
zone 0.in-addr.arpa/IN: loaded serial 0
zone rac.com/IN: loaded serial 0
zone 31.168.192.in-addr.arpa/IN: loaded serial 0
zone 10.10.10.in-addr.arpa/IN: loaded serial 0

[root@rac1 /var/named 17:40 108]# service named restart
[root@rac1 /var/named 17:40 109]# service named restart && chkconfig named on

[root@rac1~ 18:04 161]# vim /etc/resolv.conf 

search rac.com
nameserver 192.168.31.31
nameserver 223.5.5.5
[root@rac2~ 18:04 161]# vim /etc/resolv.conf 

search rac.com
nameserver 192.168.31.32
nameserver 223.5.5.5
[root@rac1 /var/named 17:43 112]# nslookup rac1 && nslookup rac1.rac.com && nslookup 192.168.31.31

Server:         192.168.31.31
Address:        192.168.31.31#53

Name:   rac1.rac.com
Address: 192.168.31.31

Server:         192.168.31.31
Address:        192.168.31.31#53

Name:   rac1.rac.com
Address: 192.168.31.31

Server:         192.168.31.31
Address:        192.168.31.31#53

31.31.168.192.in-addr.arpa      name = rac1.rac.com.

[root@rac1 /var/named 17:43 113]# nslookup rac1 && nslookup rac1.rac.com && nslookup 192.168.31.32
[root@rac1 /var/named 17:43 114]# nslookup rac1 && nslookup rac1.rac.com && nslookup 10.10.10.31
[root@rac1 /var/named 17:43 115]# nslookup rac1 && nslookup rac1.rac.com && nslookup 10.10.10.32
[root@rac1 /var/named 17:43 112]# nslookup rac2 && nslookup rac2.rac.com && nslookup 10.10.10.32

Server:         192.168.31.31
Address:        192.168.31.31#53

Name:   rac2.rac.com
Address: 192.168.31.32

Server:         192.168.31.31
Address:        192.168.31.31#53

Name:   rac2.rac.com
Address: 192.168.31.32

Server:         192.168.31.31
Address:        192.168.31.31#53

32.10.10.10.in-addr.arpa        name = rac2-priv.rac.com.

[root@rac1 /var/named 17:43 113]# nslookup rac2 && nslookup rac2.rac.com && nslookup 10.10.10.31
[root@rac1 /var/named 17:43 114]# nslookup rac2 && nslookup rac2.rac.com && nslookup 192.168.31.31
[root@rac1 /var/named 17:43 115]# nslookup rac2 && nslookup rac2.rac.com && nslookup 192.168.31.32

两节点配置,禁用服务,关闭防火墙及hosts地址

卸载openjdk
[root@host ~] yum -y remove java java-1.*-openjdk*
禁用服务
[root@host ~] service postfix stop && chkconfig postfix off && service NetworkManager stop && chkconfig NetworkManager off
[root@host ~] service iptables stop && service ip6tables stop && chkconfig iptables off && chkconfig ip6tables off

修改主机名
[root@host ~] vim /etc/sysconfig/network

修改配置文件永久关闭 SELINUX=disabled
[root@host ~] vim /etc/selinux/config

临时关闭防火墙
[root@host ~] setenforce 0

配置hosts文件
[root@host ~] vim /etc/hosts

192.168.31.31   rac1    rac1.rac.com
192.168.31.32   rac2    rac2.rac.com
10.10.10.31  rac1-priv  rac1-priv.rac.com
10.10.10.32  rac2-priv  rac2-priv.rac.com
192.168.31.33   rac1-vip        rac1-vip.rac.com
192.168.31.34   rac2-vip        rac2-vip.rac.com
192.168.31.30   rac-scan        rac-scan.rac.com
192.168.31.35   rac-scan        rac-scan.rac.com

两节点配置参数

[root@host ~ 18:13 169]# vim /etc/sysctl.conf

net.ipv4.ip_local_port_range = 9000 65500
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 2534807552
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.core.rmem_default = 262144
net.core.wmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_max = 1048576
fs.aio-max-nr = 1048576
[root@host ~ 18:14 170]# vim /etc/security/limits.conf

grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
[root@host ~ 18:15 172]# vim /etc/pam.d/login

session    required     /lib64/security/pam_limits.so
session    required     pam_limits.so
[root@host ~ 18:16 173]# vim /etc/profile

if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
  if [ $SHELL = "/bin/ksh" ]; then
    ulimit -p 16384
    ulimit -n 65536
  else
    ulimit -u 16384 -n 65536
  fi  
  umask 022 
fi

两节点配置用户,组,目录,环境

用户及组

[root@host ~] groupadd -g 200 oinstall
[root@host ~] groupadd -g 201 dba
[root@host ~] groupadd -g 202 oper
[root@host ~] groupadd -g 203 asmadmin
[root@host ~] groupadd -g 204 asmoper
[root@host ~] groupadd -g 205 asmdba
[root@host ~] useradd -u 200 -g oinstall -G dba,asmdba,oper,asmadmin oracle
[root@host ~] useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,dba grid

[root@host ~] groupadd -g 200 oinstall && groupadd -g 201 dba && groupadd -g 202 oper && groupadd -g 203 asmadmin && groupadd -g 204 asmoper && groupadd -g 205 asmdba
[root@host ~] useradd -u 200 -g oinstall -G dba,asmdba,oper,asmadmin oracle && useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,dba grid
[root@host ~] echo "1234" | passwd --stdin grid && echo "1234" | passwd --stdin oracle

安装目录

[root@host ~ 18:18 177]# mkdir -p /u01/app/11.2.0/grid && mkdir -p /u01/app/grid  && mkdir -p /u01/app/oracle && mkdir -p /u01/software
[root@host ~ 18:19 178]# chmod -R 755 /u01 && chown -R grid:oinstall /u01 && chown -R oracle:oinstall /u01/app/oracle

安装依赖

[root@host ~ 18:19 179]# yum install -y gcc compat* libstdc++-devel elfutils-libelf-devel compat-libstdc* gcc-c++ libaio-devel pdksh unixODBC net-tools vim unzip

pdksh-5.2.14-37.el5.x86_64.rpm,单独下载安装pdksh

grid用户环境变量

[grid@rac1 ~] vim /home/grid/.bash_profile

umask 022
export EDITOR=vim
export LANG="en_US.UTF-8"
export ORACLE_HOSTNAME=rac1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=+ASM1
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:${LD_LIBRARY_PATH}
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export PATH=.:${PATH}:$ORACLE_HOME/bin:/usr/bin:/bin::/usr/local/bin
alias asmcmd="rlwrap asmcmd -p"
alias sqlplus="rlwrap sqlplus"
[grid@rac2 ~] vim /home/grid/.bash_profile

umask 022
export EDITOR=vim
export LANG="en_US.UTF-8"
export ORACLE_HOSTNAME=rac2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=+ASM2
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:${LD_LIBRARY_PATH}
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export PATH=.:${PATH}:$ORACLE_HOME/bin:/usr/bin:/bin::/usr/local/bin
alias asmcmd="rlwrap asmcmd -p"
alias sqlplus="rlwrap sqlplus"

oracle用户环境

[oracle@rac1 ~] vim /home/oracle/.bash_profile

umask 022 
export EDITOR=vim
export LANG="en_US.UTF-8"
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export PATH=.:${PATH}:$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:${LD_LIBRARY_PATH}
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"
export ORACLE_HOSTNAME=rac1
export ORACLE_SID=orcl1
export ORACLE_UNQNAME=dbrac
alias sqlplus="rlwrap sqlplus"
alias rman="rlwrap rman"
[oracle@rac2 ~] vim /home/oracle/.bash_profile

umask 022 
export EDITOR=vim
export LANG="en_US.UTF-8"
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export PATH=.:${PATH}:$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:${LD_LIBRARY_PATH}
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"
export ORACLE_HOSTNAME=rac2
export ORACLE_SID=orcl2
export ORACLE_UNQNAME=dbrac
alias sqlplus="rlwrap sqlplus"
alias rman="rlwrap rman"
[root@rac1 ~] source /home/grid/.bash_profile && source /home/oracle/.bash_profile
[root@rac2 ~] source /home/grid/.bash_profile && source /home/oracle/.bash_profile

两节点配置grid用户互信,oracle用户互信

互信目的,一个节点安装通过ssh拷贝到另一个节点, 也可使用共享盘
配置sshd服务开启publickey和password

[root@rac1 ~ 18:38 2]# vim /etc/ssh/sshd_config

PubkeyAuthentication yes
AuthorizedKeysFile      .ssh/authorized_keys
PasswordAuthentication yes

[root@rac1 ~ 18:38 3]# service sshd restart

rac1节点grid用户操作,复制到rac2节点

[grid@rac1 ~ 18:40 1]$ ssh-keygen -t rsa
[grid@rac1 ~ 18:40 2]$ ssh-keygen -t dsa
[grid@rac2 ~ 18:40 1]$ ssh-keygen -t rsa
[grid@rac2 ~ 18:40 2]$ ssh-keygen -t dsa

[grid@rac1 ~ 18:40 3]$ cat .ssh/id_rsa.pub >>.ssh/authorized_keys
[grid@rac1 ~ 18:41 4]$ cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[grid@rac1 ~ 18:42 5]$ ssh rac2 cat .ssh/id_rsa.pub >>.ssh/authorized_keys && ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[grid@rac1 ~ 18:43 6]$ ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys && ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[grid@rac1 ~ 18:42 7]$ scp .ssh/authorized_keys rac2:~/.ssh
[grid@rac1 ~ 18:42 8]$  ssh rac1 date && ssh rac2 date
[grid@rac1 ~ 18:42 10]$ ssh rac1-priv date && ssh rac2-priv date
[grid@rac2 ~ 18:42 7]$  ssh rac1 date && ssh rac2 date
[grid@rac2 ~ 18:42 10]$ ssh rac1-priv date && ssh rac2-priv date

rac1节点oracle用户操作,复制到rac2节点

[oracle@rac1 ~ 18:48 1]$ ssh-keygen -t rsa
[oracle@rac1 ~ 18:48 2]$ ssh-keygen -t dsa
[oracle@rac2~ 18:48 1]$ ssh-keygen -t rsa
[oracle@rac2~ 18:48 2]$ ssh-keygen -t dsa

[oracle@rac1 ~ 18:49 3]$ cat .ssh/id_rsa.pub >>.ssh/authorized_keys
[oracle@rac1 ~ 18:50 4]$ cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[oracle@rac1 ~ 18:50 5]$ ssh rac2 cat .ssh/id_rsa.pub >>.ssh/authorized_keys && ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[oracle@rac1 ~ 18:50 6]$ ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys && ssh rac2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[oracle@rac1 ~ 18:50 7]$ scp .ssh/authorized_keys rac2:~/.ssh
[oracle@rac1 ~ 18:50 8]$ ssh rac1 date && ssh rac2 date
[oracle@rac1 ~ 18:52 9]$ ssh rac1-priv date && ssh rac2-priv date
[oracle@rac2 ~ 18:50 9]$ ssh rac1 date && ssh rac2 date
[oracle@rac2 ~ 18:52 10]$ ssh rac1-priv date && ssh rac2-priv date

安装过程会遇到的问题,缺失库文件,创建软链
安装过程报错libcap.so.1: cannot open shared object file: No such file or directory:

[root@rac1 ~ 18:57 4]# cd /lib64 && ln -s libcap.so.2.16  libcap.so.1
[root@rac1 ~ 18:57 4]# ll /lib64/libcap.so*
[root@rac2~ 18:57 4]# cd /lib64 && ln -s libcap.so.2.16  libcap.so.1
[root@rac2~ 18:57 4]# ll /lib64/libcap.so*

上传文件,解压安装

scp 112040_Linux-x86-64_3of7.zip pdksh-5.2.14-37.el5.x86_64.rpm root@192.168.31.31:/u01/software
scp 112040_Linux-x86-64_3of7.zip pdksh-5.2.14-37.el5.x86_64.rpm root@192.168.31.32:/u01/software


[root@rac1 /u01/software 19:10 21]# unzip 112040_Linux-x86-64_3of7.zip
[root@rac1 /u01/software 19:09 22]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
[root@rac1 /u01/software 19:09 23]# rpm -ivh grid/rpm/cvuqdisk-1.0.9-1.rpm
[root@rac1 /u01/software 19:10 24]# rm -fr 112040_Linux-x86-64_3of7.zip pdksh-5.2.14-37.el5.x86_64.rpm

[root@rac2 /u01/software 19:10 21]# unzip 112040_Linux-x86-64_3of7.zip
[root@rac2 /u01/software 19:09 22]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
[root@rac2 /u01/software 19:09 23]# rpm -ivh grid/rpm/cvuqdisk-1.0.9-1.rpm
[root@rac1 /u01/software 19:10 24]# rm -fr 112040_Linux-x86-64_3of7.zip pdksh-5.2.14-37.el5.x86_64.rpm

环境预检查,rac1节点或者rac2节点grid用户执行

[grid@rac1 ~ 19:14 16]$ cd /u01/software/grid/
[grid@rac1 /u01/software/grid 19:14 17]$ ls
install  readme.html  response  rpm  runcluvfy.sh  runInstaller  sshsetup  stage  welcome.html
[grid@rac1 /u01/software/grid 19:14 18]$ ./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -fixup
Performing pre-checks for cluster services setup 

Checking node reachability...
Node reachability check passed from node "rac1"


Checking user equivalence...
User equivalence check passed for user "grid"

Checking node connectivity...

Checking hosts config file...

Verification of the hosts config file successful

Node connectivity passed for subnet "192.168.31.0" with node(s) rac2,rac1
TCP connectivity check passed for subnet "192.168.31.0"

Node connectivity passed for subnet "10.10.10.0" with node(s) rac2,rac1
TCP connectivity check passed for subnet "10.10.10.0"


Interfaces found on subnet "192.168.31.0" that are likely candidates for VIP are:
rac2 eth0:192.168.31.32
rac1 eth0:192.168.31.31

Interfaces found on subnet "10.10.10.0" that are likely candidates for a private interconnect are:
rac2 eth1:10.10.10.32
rac1 eth1:10.10.10.31
Checking subnet mask consistency...
Subnet mask consistency check passed for subnet "192.168.31.0".
Subnet mask consistency check passed for subnet "10.10.10.0".
Subnet mask consistency check passed.

Node connectivity check passed

Checking multicast communication...

Checking subnet "192.168.31.0" for multicast communication with multicast group "230.0.1.0"...
Check of subnet "192.168.31.0" for multicast communication with multicast group "230.0.1.0" passed.

Checking subnet "10.10.10.0" for multicast communication with multicast group "230.0.1.0"...
Check of subnet "10.10.10.0" for multicast communication with multicast group "230.0.1.0" passed.

Check of multicast communication passed.

Checking ASMLib configuration.
Check for ASMLib configuration passed.
Total memory check passed
Available memory check passed
Swap space check passed
Free disk space check passed for "rac2:/tmp"
Free disk space check passed for "rac1:/tmp"
Check for multiple users with UID value 201 passed 
User existence check passed for "grid"
Group existence check passed for "oinstall"
Group existence check passed for "dba"
Membership check for user "grid" in group "oinstall" [as Primary] passed
Membership check for user "grid" in group "dba" passed
Run level check passed
Hard limits check passed for "maximum open file descriptors"
Soft limits check passed for "maximum open file descriptors"
Hard limits check passed for "maximum user processes"
Soft limits check passed for "maximum user processes"
System architecture check passed
Kernel version check passed
Kernel parameter check passed for "semmsl"
Kernel parameter check passed for "semmns"
Kernel parameter check passed for "semopm"
Kernel parameter check passed for "semmni"
Kernel parameter check passed for "shmmax"
Kernel parameter check passed for "shmmni"
Kernel parameter check passed for "shmall"
Kernel parameter check passed for "file-max"
Kernel parameter check passed for "ip_local_port_range"
Kernel parameter check passed for "rmem_default"
Kernel parameter check passed for "rmem_max"
Kernel parameter check passed for "wmem_default"
Kernel parameter check passed for "wmem_max"
Kernel parameter check passed for "aio-max-nr"
Package existence check passed for "make"
Package existence check passed for "binutils"
Package existence check passed for "gcc(x86_64)"
Package existence check passed for "libaio(x86_64)"
Package existence check passed for "glibc(x86_64)"
Package existence check passed for "compat-libstdc++-33(x86_64)"
Package existence check passed for "elfutils-libelf(x86_64)"
Package existence check passed for "elfutils-libelf-devel"
Package existence check passed for "glibc-common"
Package existence check passed for "glibc-devel(x86_64)"
Package existence check passed for "glibc-headers"
Package existence check passed for "gcc-c++(x86_64)"
Package existence check passed for "libaio-devel(x86_64)"
Package existence check passed for "libgcc(x86_64)"
Package existence check passed for "libstdc++(x86_64)"
Package existence check passed for "libstdc++-devel(x86_64)"
Package existence check passed for "sysstat"
Package existence check passed for "pdksh"
Package existence check passed for "expat(x86_64)"
Check for multiple users with UID value 0 passed 
Current group ID check passed

Starting check for consistency of primary group of root user

Check for consistency of root user's primary group passed

Starting Clock synchronization checks using Network Time Protocol(NTP)...

NTP Configuration file check started...
NTP Configuration file check passed

Checking daemon liveness...
Liveness check passed for "ntpd"
Check for NTP daemon or service alive passed on all nodes

NTP daemon slewing option check passed

NTP daemon's boot time configuration check for slewing option passed

NTP common Time Server Check started...
Check of common NTP Time Server passed

Clock time offset check from NTP Time Server started...
Clock time offset check passed

Clock synchronization check using Network Time Protocol(NTP) passed

Core file name pattern consistency check passed.

User "grid" is not part of "root" group. Check passed
Default user file creation mask check passed
Checking consistency of file "/etc/resolv.conf" across nodes

File "/etc/resolv.conf" does not have both domain and search entries defined
domain entry in file "/etc/resolv.conf" is consistent across nodes
search entry in file "/etc/resolv.conf" is consistent across nodes
All nodes have one search entry defined in file "/etc/resolv.conf"
The DNS response time for an unreachable node is within acceptable limit on all nodes

File "/etc/resolv.conf" is consistent across nodes

Time zone consistency check passed

Pre-check for cluster services setup was successful.

详细内容检查

[grid@rac1 /u01/software/grid 19:15 19]$ ./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose -fixup
Performing pre-checks for cluster services setup 

Checking node reachability...

Check: Node reachability from node "rac1"
  Destination Node                      Reachable?              
  ------------------------------------  ------------------------
  rac2                                  yes                     
  rac1                                  yes                     
Result: Node reachability check passed from node "rac1"


Checking user equivalence...

Check: User equivalence for user "grid"
  Node Name                             Status                  
  ------------------------------------  ------------------------
  rac2                                  passed                  
  rac1                                  passed                  
Result: User equivalence check passed for user "grid"

Checking node connectivity...

Checking hosts config file...
  Node Name                             Status                  
  ------------------------------------  ------------------------
  rac2                                  passed                  
  rac1                                  passed                  

Verification of the hosts config file successful


Interface information for node "rac2"
 Name   IP Address      Subnet          Gateway         Def. Gateway    HW Address        MTU   
 ------ --------------- --------------- --------------- --------------- ----------------- ------
 eth0   192.168.31.32   192.168.31.0    0.0.0.0         192.168.31.1    00:50:56:29:3D:8C 1500  
 eth1   10.10.10.32     10.10.10.0      0.0.0.0         192.168.31.1    00:50:56:20:21:4F 1500  


Interface information for node "rac1"
 Name   IP Address      Subnet          Gateway         Def. Gateway    HW Address        MTU   
 ------ --------------- --------------- --------------- --------------- ----------------- ------
 eth0   192.168.31.31   192.168.31.0    0.0.0.0         192.168.31.1    00:50:56:3D:32:B9 1500  
 eth1   10.10.10.31     10.10.10.0      0.0.0.0         192.168.31.1    00:50:56:28:16:69 1500  


Check: Node connectivity of subnet "192.168.31.0"
  Source                          Destination                     Connected?      
  ------------------------------  ------------------------------  ----------------
  rac2[192.168.31.32]             rac1[192.168.31.31]             yes 
  Result: Node connectivity passed for subnet "192.168.31.0" with node(s) rac2,rac1


Check: TCP connectivity of subnet "192.168.31.0"
  Source                          Destination                     Connected?      
  ------------------------------  ------------------------------  ----------------
  rac1:192.168.31.31              rac2:192.168.31.32              passed          
Result: TCP connectivity check passed for subnet "192.168.31.0"


Check: Node connectivity of subnet "10.10.10.0"
  Source                          Destination                     Connected?      
  ------------------------------  ------------------------------  ----------------
  rac2[10.10.10.32]               rac1[10.10.10.31]               yes             
Result: Node connectivity passed for subnet "10.10.10.0" with node(s) rac2,rac1


Check: TCP connectivity of subnet "10.10.10.0"
  Source                          Destination                     Connected?      
  ------------------------------  ------------------------------  ----------------
  rac1:10.10.10.31                rac2:10.10.10.32                passed          
Result: TCP connectivity check passed for subnet "10.10.10.0"


Interfaces found on subnet "192.168.31.0" that are likely candidates for VIP are:
rac2 eth0:192.168.31.32
rac1 eth0:192.168.31.31

Interfaces found on subnet "10.10.10.0" that are likely candidates for a private interconnect are:
rac2 eth1:10.10.10.32
rac1 eth1:10.10.10.31
Checking subnet mask consistency...
Subnet mask consistency check passed for subnet "192.168.31.0".
Subnet mask consistency check passed for subnet "10.10.10.0".
Subnet mask consistency check passed.

Result: Node connectivity check passed

Checking multicast communication...

Checking subnet "192.168.31.0" for multicast communication with multicast group "230.0.1.0"...
Check of subnet "192.168.31.0" for multicast communication with multicast group "230.0.1.0" passed.

Checking subnet "10.10.10.0" for multicast communication with multicast group "230.0.1.0"...
Check of subnet "10.10.10.0" for multicast communication with multicast group "230.0.1.0" passed.

Check of multicast communication passed.

Checking ASMLib configuration.
  Node Name                             Status                  
  ------------------------------------  ------------------------
    rac2                                  passed                  
  rac1                                  passed                  
Result: Check for ASMLib configuration passed.

Check: Total memory 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          4.7214GB (4950796.0KB)    1.5GB (1572864.0KB)       passed    
  rac1          4.7214GB (4950796.0KB)    1.5GB (1572864.0KB)       passed    
Result: Total memory check passed

Check: Available memory 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          4.5348GB (4755048.0KB)    50MB (51200.0KB)          passed    
  rac1          4.2446GB (4450740.0KB)    50MB (51200.0KB)          passed    
Result: Available memory check passed

Check: Swap space 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          5.8594GB (6143996.0KB)    4.7214GB (4950796.0KB)    passed    
  rac1          5.8594GB (6143996.0KB)    4.7214GB (4950796.0KB)    passed    
Result: Swap space check passed

Check: Free disk space for "rac2:/tmp" 
  Path              Node Name     Mount point   Available     Required      Status      
  ----------------  ------------  ------------  ------------  ------------  ------------
  /tmp              rac2          /             23.6729GB     1GB           passed      
Result: Free disk space check passed for "rac2:/tmp"

Check: Free disk space for "rac1:/tmp" 
  Path              Node Name     Mount point   Available     Required      Status      
  ----------------  ------------  ------------  ------------  ------------  ------------
  /tmp              rac1          /             22.4624GB     1GB           passed      
Result: Free disk space check passed for "rac1:/tmp"

Check: User existence for "grid" 
  Node Name     Status                    Comment                 
  ------------  ------------------------  ------------------------
  rac2          passed                    exists(201)             
  rac1          passed                    exists(201)             

Checking for multiple users with UID value 201
Result: Check for multiple users with UID value 201 passed 
Result: User existence check passed for "grid"

Check: Group existence for "oinstall" 
  Node Name     Status                    Comment                 
  ------------  ------------------------  ------------------------
  rac2          passed                    exists            
    rac1          passed                    exists                  
Result: Group existence check passed for "oinstall"

Check: Group existence for "dba" 
  Node Name     Status                    Comment                 
  ------------  ------------------------  ------------------------
  rac2          passed                    exists                  
  rac1          passed                    exists                  
Result: Group existence check passed for "dba"

Check: Membership of user "grid" in group "oinstall" [as Primary]
  Node Name         User Exists   Group Exists  User in Group  Primary       Status      
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              yes           yes           yes           yes           passed      
  rac1              yes           yes           yes           yes           passed      
Result: Membership check for user "grid" in group "oinstall" [as Primary] passed

Check: Membership of user "grid" in group "dba" 
  Node Name         User Exists   Group Exists  User in Group  Status          
  ----------------  ------------  ------------  ------------  ----------------
  rac2              yes           yes           yes           passed          
  rac1              yes           yes           yes           passed          
Result: Membership check for user "grid" in group "dba" passed

Check: Run level 
  Node Name     run level                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          3                         3,5                       passed    
  rac1          3                         3,5                       passed    
Result: Run level check passed

Check: Hard limits for "maximum open file descriptors" 
  Node Name         Type          Available     Required      Status          
  ----------------  ------------  ------------  ------------  ----------------
  rac2              hard          65536         65536         passed          
  rac1              hard          65536         65536         passed          
Result: Hard limits check passed for "maximum open file descriptors"

Check: Soft limits for "maximum open file descriptors" 
  Node Name         Type          Available     Required      Status          
  ----------------  ------------  ------------  ------------  ----------------
  rac2              soft          1024          1024          passed          
  rac1              soft          1024          1024          passed          
Result: Soft limits check passed for "maximum open file descriptors"

Check: Hard limits for "maximum user processes" 
  Node Name         Type          Available     Required      Status          
  ----------------  ------------  ------------  ------------  ----------------
  rac2              hard          16384         16384         passed          
  rac1              hard          16384         16384         passed          
Result: Hard limits check passed for "maximum user processes"
Check: Soft limits for "maximum user processes" 
  Node Name         Type          Available     Required      Status          
  ----------------  ------------  ------------  ------------  ----------------
  rac2              soft          2047          2047          passed          
  rac1              soft          2047          2047          passed          
Result: Soft limits check passed for "maximum user processes"

Check: System architecture 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          x86_64                    x86_64                    passed    
  rac1          x86_64                    x86_64                    passed    
Result: System architecture check passed

Check: Kernel version 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          2.6.32-754.35.1.el6.x86_64  2.6.9                     passed    
  rac1          2.6.32-754.35.1.el6.x86_64  2.6.9                     passed    
Result: Kernel version check passed

Check: Kernel parameter for "semmsl" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              250           250           250           passed          
  rac1              250           250           250           passed          
Result: Kernel parameter check passed for "semmsl"

Check: Kernel parameter for "semmns" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              32000         32000         32000         passed          
  rac1              32000         32000         32000         passed          
Result: Kernel parameter check passed for "semmns"

Check: Kernel parameter for "semopm" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              100           100           100           passed          
  rac1              100           100           100           passed          
Result: Kernel parameter check passed for "semopm"

Check: Kernel parameter for "semmni" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              128           128           128           passed          
  rac1              128           128           128           passed          
Result: Kernel parameter check passed for "semmni"

Check: Kernel parameter for "shmmax" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              2684354560    2684354560    2534807552    passed          
  rac1              2684354560    2684354560    2534807552    passed          
Result: Kernel parameter check passed for "shmmax"

Check: Kernel parameter for "shmmni" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              4096          4096          4096          passed          
  rac1              4096          4096          4096          passed          
Result: Kernel parameter check passed for "shmmni"

Check: Kernel parameter for "shmall" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              10523004      10523004      2097152       passed          
  rac1              10523004      10523004      2097152       passed          
Result: Kernel parameter check passed for "shmall"

Check: Kernel parameter for "file-max" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              6815744       6815744       6815744       passed          
  rac1              6815744       6815744       6815744       passed          
Result: Kernel parameter check passed for "file-max"

Check: Kernel parameter for "ip_local_port_range" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              between 9000.0 & 65500.0  between 9000.0 & 65500.0  between 9000.0 & 65500.0  passed          
  rac1              between 9000.0 & 65500.0  between 9000.0 & 65500.0  between 9000.0 & 65500.0  passed          
Result: Kernel parameter check passed for "ip_local_port_range"

Check: Kernel parameter for "rmem_default" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              262144        262144        262144        passed          
  rac1              262144        262144        262144        passed          
Result: Kernel parameter check passed for "rmem_default"

Check: Kernel parameter for "rmem_max" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              4194304       4194304       4194304       passed          
  rac1              4194304       4194304       4194304       passed          
Result: Kernel parameter check passed for "rmem_max"

Check: Kernel parameter for "wmem_default" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
    rac2              262144        262144        262144        passed          
  rac1              262144        262144        262144        passed          
Result: Kernel parameter check passed for "wmem_default"

Check: Kernel parameter for "wmem_max" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              1048576       1048576       1048576       passed          
  rac1              1048576       1048576       1048576       passed          
Result: Kernel parameter check passed for "wmem_max"

Check: Kernel parameter for "aio-max-nr" 
  Node Name         Current       Configured    Required      Status        Comment     
  ----------------  ------------  ------------  ------------  ------------  ------------
  rac2              1048576       1048576       1048576       passed          
  rac1              1048576       1048576       1048576       passed          
Result: Kernel parameter check passed for "aio-max-nr"

Check: Package existence for "make" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          make-3.81-23.el6          make-3.80                 passed    
  rac1          make-3.81-23.el6          make-3.80                 passed    
Result: Package existence check passed for "make"

Check: Package existence for "binutils" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          binutils-2.20.51.0.2-5.48.el6_10.1  binutils-2.15.92.0.2      passed    
  rac1          binutils-2.20.51.0.2-5.48.el6_10.1  binutils-2.15.92.0.2      passed    
Result: Package existence check passed for "binutils"

Check: Package existence for "gcc(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          gcc(x86_64)-4.4.7-23.el6  gcc(x86_64)-3.4.6         passed    
  rac1          gcc(x86_64)-4.4.7-23.el6  gcc(x86_64)-3.4.6         passed    
Result: Package existence check passed for "gcc(x86_64)"

Check: Package existence for "libaio(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          libaio(x86_64)-0.3.107-10.el6  libaio(x86_64)-0.3.105    passed    
  rac1          libaio(x86_64)-0.3.107-10.el6  libaio(x86_64)-0.3.105    passed    
Result: Package existence check passed for "libaio(x86_64)"

Check: Package existence for "glibc(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          glibc(x86_64)-2.12-1.212.el6_10.3  glibc(x86_64)-2.3.4-2.41  passed    
  rac1          glibc(x86_64)-2.12-1.212.el6_10.3  glibc(x86_64)-2.3.4-2.41  passed 
  Result: Package existence check passed for "glibc(x86_64)"

Check: Package existence for "compat-libstdc++-33(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          compat-libstdc++-33(x86_64)-3.2.3-69.el6  compat-libstdc++-33(x86_64)-3.2.3  passed    
  rac1          compat-libstdc++-33(x86_64)-3.2.3-69.el6  compat-libstdc++-33(x86_64)-3.2.3  passed    
Result: Package existence check passed for "compat-libstdc++-33(x86_64)"

Check: Package existence for "elfutils-libelf(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          elfutils-libelf(x86_64)-0.164-2.el6  elfutils-libelf(x86_64)-0.97  passed    
  rac1          elfutils-libelf(x86_64)-0.164-2.el6  elfutils-libelf(x86_64)-0.97  passed    
Result: Package existence check passed for "elfutils-libelf(x86_64)"

Check: Package existence for "elfutils-libelf-devel" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          elfutils-libelf-devel-0.164-2.el6  elfutils-libelf-devel-0.97  passed    
  rac1          elfutils-libelf-devel-0.164-2.el6  elfutils-libelf-devel-0.97  passed    
Result: Package existence check passed for "elfutils-libelf-devel"

Check: Package existence for "glibc-common" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          glibc-common-2.12-1.212.el6_10.3  glibc-common-2.3.4        passed    
  rac1          glibc-common-2.12-1.212.el6_10.3  glibc-common-2.3.4        passed    
Result: Package existence check passed for "glibc-common"

Check: Package existence for "glibc-devel(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          glibc-devel(x86_64)-2.12-1.212.el6_10.3  glibc-devel(x86_64)-2.3.4  passed    
  rac1          glibc-devel(x86_64)-2.12-1.212.el6_10.3  glibc-devel(x86_64)-2.3.4  passed    
Result: Package existence check passed for "glibc-devel(x86_64)"

Check: Package existence for "glibc-headers" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          glibc-headers-2.12-1.212.el6_10.3  glibc-headers-2.3.4       passed    
  rac1          glibc-headers-2.12-1.212.el6_10.3  glibc-headers-2.3.4       passed    
Result: Package existence check passed for "glibc-headers"

Check: Package existence for "gcc-c++(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          gcc-c++(x86_64)-4.4.7-23.el6  gcc-c++(x86_64)-3.4.6     passed    
  rac1          gcc-c++(x86_64)-4.4.7-23.el6  gcc-c++(x86_64)-3.4.6     passed    
Result: Package existence check passed for "gcc-c++(x86_64)"
Check: Package existence for "libaio-devel(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          libaio-devel(x86_64)-0.3.107-10.el6  libaio-devel(x86_64)-0.3.105  passed    
  rac1          libaio-devel(x86_64)-0.3.107-10.el6  libaio-devel(x86_64)-0.3.105  passed    
Result: Package existence check passed for "libaio-devel(x86_64)"

Check: Package existence for "libgcc(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          libgcc(x86_64)-4.4.7-23.el6  libgcc(x86_64)-3.4.6      passed    
  rac1          libgcc(x86_64)-4.4.7-23.el6  libgcc(x86_64)-3.4.6      passed    
Result: Package existence check passed for "libgcc(x86_64)"

Check: Package existence for "libstdc++(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          libstdc++(x86_64)-4.4.7-23.el6  libstdc++(x86_64)-3.4.6   passed    
  rac1          libstdc++(x86_64)-4.4.7-23.el6  libstdc++(x86_64)-3.4.6   passed    
Result: Package existence check passed for "libstdc++(x86_64)"

Check: Package existence for "libstdc++-devel(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          libstdc++-devel(x86_64)-4.4.7-23.el6  libstdc++-devel(x86_64)-3.4.6  passed    
  rac1          libstdc++-devel(x86_64)-4.4.7-23.el6  libstdc++-devel(x86_64)-3.4.6  passed    
Result: Package existence check passed for "libstdc++-devel(x86_64)"

Check: Package existence for "sysstat" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          sysstat-9.0.4-33.el6_9.1  sysstat-5.0.5             passed    
  rac1          sysstat-9.0.4-33.el6_9.1  sysstat-5.0.5             passed    
Result: Package existence check passed for "sysstat"

Check: Package existence for "pdksh" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          pdksh-5.2.14-37.el5       pdksh-5.2.14              passed    
  rac1          pdksh-5.2.14-37.el5       pdksh-5.2.14              passed    
Result: Package existence check passed for "pdksh"

Check: Package existence for "expat(x86_64)" 
  Node Name     Available                 Required                  Status    
  ------------  ------------------------  ------------------------  ----------
  rac2          expat(x86_64)-2.0.1-13.el6_8  expat(x86_64)-1.95.7      passed    
  rac1          expat(x86_64)-2.0.1-13.el6_8  expat(x86_64)-1.95.7      passed    
Result: Package existence check passed for "expat(x86_64)"

Checking for multiple users with UID value 0
Result: Check for multiple users with UID value 0 passed 
Check: Current group ID 
Result: Current group ID check passed

Starting check for consistency of primary group of root user
  Node Name                             Status                  
  ------------------------------------  ------------------------
  rac2                                  passed                  
  rac1                                  passed                  

Check for consistency of root user's primary group passed

Starting Clock synchronization checks using Network Time Protocol(NTP)...

NTP Configuration file check started...
The NTP configuration file "/etc/ntp.conf" is available on all nodes
NTP Configuration file check passed

Checking daemon liveness...

Check: Liveness for "ntpd"
  Node Name                             Running?                
  ------------------------------------  ------------------------
  rac2                                  yes                     
  rac1                                  yes                     
Result: Liveness check passed for "ntpd"
Check for NTP daemon or service alive passed on all nodes

Checking NTP daemon command line for slewing option "-x"
Check: NTP daemon command line
  Node Name                             Slewing Option Set?     
  ------------------------------------  ------------------------
  rac2                                  yes                     
  rac1                                  yes                     
Result: 
NTP daemon slewing option check passed

Checking NTP daemon's boot time configuration, in file "/etc/sysconfig/ntpd", for slewing option "-x"

Check: NTP daemon's boot time configuration
  Node Name                             Slewing Option Set?     
  ------------------------------------  ------------------------
  rac2                                  yes                     
  rac1                                  yes                     
Result: 
NTP daemon's boot time configuration check for slewing option passed

Checking whether NTP daemon or service is using UDP port 123 on all nodes

Check for NTP daemon or service using UDP port 123
  Node Name                             Port Open?  
    ------------------------------------  ------------------------
  rac2                                  yes                     
  rac1                                  yes                     

NTP common Time Server Check started...
NTP Time Server ".LOCL." is common to all nodes on which the NTP daemon is running
Check of common NTP Time Server passed

Clock time offset check from NTP Time Server started...
Checking on nodes "[rac2, rac1]"... 
Check: Clock time offset from NTP Time Server

Time Server: .LOCL. 
Time Offset Limit: 1000.0 msecs
  Node Name     Time Offset               Status                  
  ------------  ------------------------  ------------------------
  rac2          0.0                       passed                  
  rac1          0.0                       passed                  
Time Server ".LOCL." has time offsets that are within permissible limits for nodes "[rac2, rac1]". 
Clock time offset check passed

Result: Clock synchronization check using Network Time Protocol(NTP) passed

Checking Core file name pattern consistency...
Core file name pattern consistency check passed.

Checking to make sure user "grid" is not in "root" group
  Node Name     Status                    Comment                 
  ------------  ------------------------  ------------------------
  rac2          passed                    does not exist          
  rac1          passed                    does not exist          
Result: User "grid" is not part of "root" group. Check passed

Check default user file creation mask
  Node Name     Available                 Required                  Comment   
  ------------  ------------------------  ------------------------  ----------
  rac2          0022                      0022                      passed    
  rac1          0022                      0022                      passed    
Result: Default user file creation mask check passed
Checking consistency of file "/etc/resolv.conf" across nodes

Checking the file "/etc/resolv.conf" to make sure only one of domain and search entries is defined
File "/etc/resolv.conf" does not have both domain and search entries defined
Checking if domain entry in file "/etc/resolv.conf" is consistent across the nodes...
domain entry in file "/etc/resolv.conf" is consistent across nodes
Checking if search entry in file "/etc/resolv.conf" is consistent across the nodes...
search entry in file "/etc/resolv.conf" is consistent across nodes
Checking file "/etc/resolv.conf" to make sure that only one search entry is defined
All nodes have one search entry defined in file "/etc/resolv.conf"
Checking all nodes to make sure that search entry is "dns.rac.com" as found on node "rac2"
All nodes of the cluster have same value for 'search'
Time Offset Limit: 1000.0 msecs
  Node Name     Time Offset               Status                  
  ------------  ------------------------  ------------------------
  rac2          0.0                       passed                  
  rac1          0.0                       passed                  
Time Server ".LOCL." has time offsets that are within permissible limits for nodes "[rac2, rac1]". 
Clock time offset check passed

Result: Clock synchronization check using Network Time Protocol(NTP) passed

Checking Core file name pattern consistency...
Core file name pattern consistency check passed.

Checking to make sure user "grid" is not in "root" group
  Node Name     Status                    Comment                 
  ------------  ------------------------  ------------------------
  rac2          passed                    does not exist          
  rac1          passed                    does not exist          
Result: User "grid" is not part of "root" group. Check passed

Check default user file creation mask
  Node Name     Available                 Required                  Comment   
  ------------  ------------------------  ------------------------  ----------
  rac2          0022                      0022                      passed    
  rac1          0022                      0022                      passed    
Result: Default user file creation mask check passed
Checking consistency of file "/etc/resolv.conf" across nodes

Checking the file "/etc/resolv.conf" to make sure only one of domain and search entries is defined
File "/etc/resolv.conf" does not have both domain and search entries defined
Checking if domain entry in file "/etc/resolv.conf" is consistent across the nodes...
domain entry in file "/etc/resolv.conf" is consistent across nodes
Checking if search entry in file "/etc/resolv.conf" is consistent across the nodes...
search entry in file "/etc/resolv.conf" is consistent across nodes
Checking file "/etc/resolv.conf" to make sure that only one search entry is defined
All nodes have one search entry defined in file "/etc/resolv.conf"
Checking all nodes to make sure that search entry is "dns.rac.com" as found on node "rac2"
All nodes of the cluster have same value for 'search'
Checking DNS response time for an unreachable node
  Node Name                             Status                  
  ------------------------------------  ------------------------
  rac2                                  passed                  
  rac1                                  passed                  
The DNS response time for an unreachable node is within acceptable limit on all nodes

File "/etc/resolv.conf" is consistent across nodes

Check: Time zone consistency 
Result: Time zone consistency check passed

Pre-check for cluster services setup was successful. 

安装集群软件,环境有限,使用VNC远程安装

上面环境变量有说明,可改
grid集群实例:

  • rac1: export ORACLE_SID=+ASM1
  • rac2: export ORACLE_SID=+ASM2

Oracle实例

  • 数据库名: export ORACLE_UNQNAME=dbrac
  • rac1实例: orcl1
  • rac2实例: orcl2
[root@rac1 ~ 19:35 1]# xhost +
access control disabled, clients can connect from any host
[root@rac1 ~ 19:38 2]# su - grid
[grid@rac1 ~ 19:38 1]$ cd /u01/software/grid/
[grid@rac1 /u01/software/grid 19:38 2]$ ls
install  readme.html  response  rpm  runcluvfy.sh  runInstaller  sshsetup  stage  welcome.html
[grid@rac1 /u01/software/grid 19:38 3]$ ./runInstaller

在这里插入图片描述在这里插入图片描述在这里插入图片描述在这里插入图片描述在这里插入图片描述在这里插入图片描述
在这里插入图片描述在这里插入图片描述在这里插入图片描述在这里插入图片描述在这里插入图片描述默认下一步,下一步,下一步,忽略安装
在这里插入图片描述在这里插入图片描述在这里插入图片描述
rac1节点和rac2节点分别以 root用户执行脚本: /u01/app/oraInventory/orainstRoot.sh

[root@rac1 ~ 20:10 43]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.

[root@rac2 ~ 20:10 31]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.

rac1节点和rac2节点分别以 root用户执行脚本: /u01/app/11.2.0/grid/root.sh

[root@rac1 ~ 20:10 44]# /u01/app/11.2.0/grid/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /u01/app/11.2.0/grid

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
Installing Trace File Analyzer
OLR initialization - successful
  root wallet
  root wallet cert
  root cert export
  peer wallet
  profile reader wallet
  pa wallet
  peer wallet keys
  pa wallet keys
  peer cert request
  pa cert request
  peer cert
  pa cert
  peer root cert TP
  profile reader root cert TP
  pa root cert TP
  peer pa cert TP
  pa peer cert TP
  profile reader pa cert TP
  profile reader peer cert TP
  peer user cert
  pa user cert
Adding Clusterware entries to upstart
CRS-2672: Attempting to start 'ora.mdnsd' on 'rac1'
CRS-2676: Start of 'ora.mdnsd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'rac1'
CRS-2676: Start of 'ora.gpnpd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac1'
CRS-2672: Attempting to start 'ora.gipcd' on 'rac1'
CRS-2676: Start of 'ora.cssdmonitor' on 'rac1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'rac1'
CRS-2672: Attempting to start 'ora.diskmon' on 'rac1'
CRS-2676: Start of 'ora.diskmon' on 'rac1' succeeded
CRS-2676: Start of 'ora.cssd' on 'rac1' succeeded

ASM created and started successfully.

Disk Group VOT created successfully.

clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-4256: Updating the profile
Successful addition of voting disk 5ab02e2fb9a54f8dbf121cc53e770728.
Successful addition of voting disk 60642d492a8d4f7dbfd493418de1fc44.
Successful addition of voting disk 112961190b7e4f57bf18f20e6bee744f.
Successfully replaced voting disk group with +VOT.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
 1. ONLINE   5ab02e2fb9a54f8dbf121cc53e770728 (/dev/asm-diskb) [VOT]
 2. ONLINE   60642d492a8d4f7dbfd493418de1fc44 (/dev/asm-diskc) [VOT]
 3. ONLINE   112961190b7e4f57bf18f20e6bee744f (/dev/asm-diskd) [VOT]
Located 3 voting disk(s).
CRS-2672: Attempting to start 'ora.asm' on 'rac1'
CRS-2676: Start of 'ora.asm' on 'rac1' succeeded
CRS-2672: Attempting to start 'ora.VOT.dg' on 'rac1'
CRS-2676: Start of 'ora.VOT.dg' on 'rac1' succeeded
Configure Oracle Grid Infrastructure for a Cluster ... succeeded

节点1执行完第二个root.sh脚本后, 重启节点2机器
如果不重启节点2, 节点2会安装服务与节点1冲突,导致安装失败,重启后节点执行root.sh脚本

[root@rac2 ~ 20:21 1]# /u01/app/11.2.0/grid/root.sh
Performing root user operation for Oracle 11g 

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /u01/app/11.2.0/grid

Enter the full pathname of the local bin directory: [/usr/local/bin]: 
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
Installing Trace File Analyzer
OLR initialization - successful
Adding Clusterware entries to upstart
CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node rac1, number 1, and is terminating
An active cluster was found during exclusive startup, restarting to join the cluster
Configure Oracle Grid Infrastructure for a Cluster ... succeeded

图形界面,OK继续执行,如有错误, 安装前先安装asmlib,完成!

kmod-oracleasm-2.0.8-16.1.el6_10.x86_64.rpm
oracleasm-support-2.1.8-1.el6.x86_64.rpm
oracleasmlib-2.0.12-1.el6.x86_64.rpm

[root@rac1 ~ 09:48 46]# vim /etc/yum.conf

修改keepcache=1, 下载文件可保存到本地  cachedir=/var/cache/yum/$basearch/$releasever

[root@rac1 ~ 09:48 46]# yum -y install  kmod-oracleasm

https://oss.oracle.com/ol6/SRPMS-updates/
oracleasm-support-2.1.8-1.el6.x86_64.rpm,下载不了,花元下载的,不敢上传怕内卷
用oracleasm很丝滑,预检查没报错,安装没报错!

https://www.oracle.com/linux/downloads/linux-asmlib-v6-downloads.html
oracleasmlib-2.0.12-1.el6.x86_64.rpm

日志文件位置
在这里插入图片描述

asmca创建磁盘组, data组和fra组(名称可自定义), Oracle安装需要

在这里插入图片描述在这里插入图片描述在这里插入图片描述
在这里插入图片描述rac1节点查看状态

20:16:49 [SYS@+ASM1/AS SYSDBA]>colu name for a20
20:17:49 [SYS@+ASM1/AS SYSDBA]>desc v$asm_diskgroup;
20:20:36 [SYS@+ASM1/AS SYSDBA]>select group_number,name,STATE from v$asm_diskgroup;

GROUP_NUMBER|NAME                |STATE
------------|--------------------|---------------------------------
           1|DATA                |MOUNTED
           2|RECOV               |MOUNTED
           3|VOT                 |MOUNTED

rac2节点登录sqlplus挂载磁盘

sqlplus sys as sysasm
20:19:24 [SYS@+ASM2/AS SYSDBA]>col name for a20
20:19:35 [SYS@+ASM2/AS SYSDBA]>select group_number,name,state from v$asm_diskgroup;

GROUP_NUMBER|NAME                |STATE
------------|--------------------|---------------------------------
           3|VOT                 |MOUNTED
           0|RECOV               |DISMOUNTED
           0|DATA                |DISMOUNTED
20:20:15 [SYS@+ASM2/AS SYSASM]>alter diskgroup data mount;

Diskgroup altered.

20:20:19 [SYS@+ASM2/AS SYSASM]>alter diskgroup recov mount;

Diskgroup altered.

20:20:29 [SYS@+ASM2/AS SYSASM]>select group_number,name,state from v$asm_diskgroup;

GROUP_NUMBER|NAME                |STATE
------------|--------------------|---------------------------------
           3|VOT                 |MOUNTED
           2|RECOV               |MOUNTED
           1|DATA                |MOUNTED

查看磁盘状态

[# srvctl status diskgroup -g vot/data/recov ]
Disk Group vot is running on rac2,rac1
Disk Group data is running on rac2,rac1
Disk Group recov is running on rac2,rac1
[# ssh grid@rac1/rac2 '. .bash_profile; asmcmd -p lsct' ]
DB_Name  Status     Software_Version  Compatible_version  Instance_Name  Disk_Group
+ASM     CONNECTED        11.2.0.4.0          11.2.0.4.0  +ASM1          VOT       
+ASM     CONNECTED        11.2.0.4.0          11.2.0.4.0  +ASM1          DATA      
DB_Name  Status     Software_Version  Compatible_version  Instance_Name  Disk_Group
+ASM     CONNECTED        11.2.0.4.0          11.2.0.4.0  +ASM2          VOT       
+ASM     CONNECTED        11.2.0.4.0          11.2.0.4.0  +ASM2          DATA
[# asmcmd -p lsdg && ssh rac2 ". ~/.bash_profile;asmcmd -p lsdg" ]
State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
MOUNTED  NORMAL  N         512   4096  1048576     61440    55886            20480           17703              0             N  DATA/
MOUNTED  NORMAL  N         512   4096  1048576     46080    45099            15360           14869              0             N  RECOV/
MOUNTED  NORMAL  N         512   4096  1048576      6144     5218             2048            1585              0             Y  VOT/
State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
MOUNTED  NORMAL  N         512   4096  1048576     61440    55886            20480           17703              0             N  DATA/
MOUNTED  NORMAL  N         512   4096  1048576     46080    45099            15360           14869              0             N  RECOV/
MOUNTED  NORMAL  N         512   4096  1048576      6144     5218             2048            1585              0             Y  VOT/    

磁盘类型

[root@rac1 ~ 23:00 48]# lsblk -f
NAME                        FSTYPE      LABEL UUID                                   MOUNTPOINT
sda                                                                                  
├─sda1                      ext4              0ddf8925-9746-4c86-9af9-399ac057098e   /boot
├─sda2                      swap              4dd0f561-432a-40ec-b822-5e7bbab3043c   [SWAP]
└─sda3                      LVM2_member       rG861i-8v1K-BLCR-op6F-riRe-ONWn-EVMVHT 
  └─vg_host-LogVol00 (dm-0) ext4              b86f35ba-2151-4d07-abac-c9c50229db3d   /
sdb                         oracleasm                                                
sdc                         oracleasm                                                
sdd                         oracleasm                                                
sde                         oracleasm                                                
sdf                         oracleasm                                                
sdh                         oracleasm                                                
sdi                         oracleasm                                                
sdg                         oracleasm                                                
sdj                         oracleasm

查看集群状态

rac1节点和rac2节点root用户配置crs环境

[root@rac1 ~ 17:08 8]# vim .bash_profile
PATH=$PATH:$HOME/bin
CRS_HOME="/u01/app/11.2.0/grid"
PATH=$PATH:$HOME/bin:$CRS_HOME/bin
export JAVA_HOME=/usr/local/jdk
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
if ! echo $PATH | grep -E -q "(^|:)$JAVA_HOME/bin(:|$)";then export PATH=$JAVA_HOME/bin:$PATH;fi
[root@rac1 ~ 17:08 9]# . .bash_profile
[root@rac2 ~ 17:08 8]# vim .bash_profile
PATH=$PATH:$HOME/bin
CRS_HOME="/u01/app/11.2.0/grid"
PATH=$PATH:$HOME/bin:$CRS_HOME/bin
export JAVA_HOME=/usr/local/jdk
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
if ! echo $PATH | grep -E -q "(^|:)$JAVA_HOME/bin(:|$)";then export PATH=$JAVA_HOME/bin:$PATH;fi
[root@rac2 ~ 17:08 9]# . .bash_profile

方便脚本统一查看状态,root用户与grid,oracle用户互信,可使用su

[root@rac1 ~ 17:36 8]# ssh-keygen -t rsa
[root@rac1 ~ 17:36 8]# ssh-copy-id -i ~/.ssh/id_rsa.pub grid@rac1 && ssh-copy-id -i ~/.ssh/id_rsa.pub grid@rac2
[root@rac1 ~ 17:36 8]# ssh-copy-id -i ~/.ssh/id_rsa.pub oracle@rac1 && ssh-copy-id -i ~/.ssh/id_rsa.pub oracle@rac2
[root@rac2 ~ 17:36 8]# ssh-keygen -t rsa
[root@rac2 ~ 17:36 8]# ssh-copy-id -i ~/.ssh/id_rsa.pub grid@rac1 && ssh-copy-id -i ~/.ssh/id_rsa.pub grid@rac2
[root@rac2 ~ 17:36 8]# ssh-copy-id -i ~/.ssh/id_rsa.pub oracle@rac1 && ssh-copy-id -i ~/.ssh/id_rsa.pub oracle@rac2
[# check local crs and check cluster crs ]
[# crsctl check crs ]
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[# crsctl check cluster -all ]
**************************************************************
rac1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
rac2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[# crs_stat -t -v && crsctl status resource -t ]
Name           Type           R/RA   F/FT   Target    State     Host        
----------------------------------------------------------------------
ora.DATA.dg    ora....up.type 0/5    0/     ONLINE    ONLINE    rac1        
ora....ER.lsnr ora....er.type 0/5    0/     ONLINE    ONLINE    rac1        
ora....N1.lsnr ora....er.type 0/5    0/0    ONLINE    ONLINE    rac2        
ora....N2.lsnr ora....er.type 0/5    0/0    ONLINE    ONLINE    rac1        
ora.RECOV.dg   ora....up.type 0/5    0/     ONLINE    ONLINE    rac1        
ora.VOT.dg     ora....up.type 0/5    0/     ONLINE    ONLINE    rac1        
ora.asm        ora.asm.type   0/5    0/     ONLINE    ONLINE    rac1        
ora.cvu        ora.cvu.type   0/5    0/0    ONLINE    ONLINE    rac1        
ora.gsd        ora.gsd.type   0/5    0/     OFFLINE   OFFLINE               
ora....network ora....rk.type 0/5    0/     ONLINE    ONLINE    rac1        
ora.oc4j       ora.oc4j.type  0/1    0/2    ONLINE    ONLINE    rac1        
ora.ons        ora.ons.type   0/3    0/     ONLINE    ONLINE    rac1        
ora....SM1.asm application    0/5    0/0    ONLINE    ONLINE    rac1        
ora....C1.lsnr application    0/5    0/0    ONLINE    ONLINE    rac1        
ora.rac1.gsd   application    0/5    0/0    OFFLINE   OFFLINE               
ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1        
ora.rac1.vip   ora....t1.type 0/0    0/0    ONLINE    ONLINE    rac1        
ora....SM2.asm application    0/5    0/0    ONLINE    ONLINE    rac2        
ora....C2.lsnr application    0/5    0/0    ONLINE    ONLINE    rac2        
ora.rac2.gsd   application    0/5    0/0    OFFLINE   OFFLINE               
ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2        
ora.rac2.vip   ora....t1.type 0/0    0/0    ONLINE    ONLINE    rac2        
ora.scan1.vip  ora....ip.type 0/0    0/0    ONLINE    ONLINE    rac2        
ora.scan2.vip  ora....ip.type 0/0    0/0    ONLINE    ONLINE    rac1        
[# olsnodes -n -i -s -t ]
rac1    1       rac1-vip        Active  Unpinned
rac2    2       rac2-vip        Active  Unpinned
[# srvctl status asm -a ]
ASM is running on rac2,rac1
ASM is enabled.
[# srvctl config scan ]
SCAN name: rac-scan, Network: 1/192.168.31.0/255.255.255.0/eth0
SCAN VIP name: scan1, IP: /rac-scan/192.168.31.30
SCAN VIP name: scan2, IP: /rac-scan/192.168.31.35
[# srvctl config nodeapps -a -g -s ]
Network exists: 1/192.168.31.0/255.255.255.0/eth0, type static
VIP exists: /rac1-vip/192.168.31.33/192.168.31.0/255.255.255.0/eth0, hosting node rac1
VIP exists: /rac2-vip/192.168.31.34/192.168.31.0/255.255.255.0/eth0, hosting node rac2
GSD exists
ONS exists: Local port 6100, remote port 6200, EM port 2016


[root@rac1 ~ 20:46 49]# crsctl status resource -t
--------------------------------------------------------------------------------
NAME           TARGET  STATE        SERVER                   STATE_DETAILS       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
               ONLINE  ONLINE       rac1                                         
               ONLINE  OFFLINE      rac2                                         
ora.LISTENER.lsnr
               ONLINE  ONLINE       rac1                                         
               ONLINE  ONLINE       rac2                                         
ora.RECOV.dg
               ONLINE  ONLINE       rac1                                         
               ONLINE  OFFLINE      rac2                                         
ora.VOT.dg
               ONLINE  ONLINE       rac1                                         
               ONLINE  ONLINE       rac2                                         
ora.asm
               ONLINE  ONLINE       rac1                     Started             
               ONLINE  ONLINE       rac2                     Started             
ora.gsd
               OFFLINE OFFLINE      rac1                                         
               OFFLINE OFFLINE      rac2                                         
ora.net1.network
               ONLINE  ONLINE       rac1                                         
               ONLINE  ONLINE       rac2                                         
ora.ons
               ONLINE  ONLINE       rac1                                         
               ONLINE  ONLINE       rac2                                         
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       rac2                                         
ora.LISTENER_SCAN2.lsnr
      1        ONLINE  ONLINE       rac1                                         
ora.cvu
      1        ONLINE  ONLINE       rac1                                         
ora.oc4j
      1        ONLINE  ONLINE       rac1                                         
ora.rac1.vip
      1        ONLINE  ONLINE       rac1                                         
ora.rac2.vip
      1        ONLINE  ONLINE       rac2                                         
ora.scan1.vip
      1        ONLINE  ONLINE       rac2                                         
ora.scan2.vip
      1        ONLINE  ONLINE       rac1 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值