实验环境
操作系统:Centos6.8
数据库:Oracle&grid 11.2.0.4
共享存储:Openfiler
操作思路:
一.系统配置:
1.关闭防火墙和selinux
2.配置好主机IP
地址和主机名
3.关闭NTP
服务(使用oracle
的NTP
服务,必须禁用系统的NTP
服务)
4.配置dns
服务(1
节点是dns server
)
5.建立安装用户,配置相应目录,给予权限,配置环境变量
5.1 grid
用户
5.2 oracle
用户
6.配置系统参数(2
个节点都做)
限制参数
内核参数
7.安装软件包(DNS
已经配置好yum
)(2
个节点都做)
8. 使用udev
配置共享磁盘
9.配置互信
配置grid
用户的互信 可以在安装界面做
配置oracle
用户的互信 可以在安装界面做
10.上传安装介质,安装和指定lib
包
在主节点上上传oracle
和grid
安装包。
二.安装集群软件
11.安装grid
12.配置asm
盘
三.安装数据库
13.安装oracle
IP
规划
主机名 IP地址:(eth0为公网ip,eth1为私网ip)
up1.node.com : 192.168.1.130/24(eth0) 172.16.1.131/24(eth1)
up2.node.com : 192.168.1.140/24(eth0) 172.16.1.141/24(eth1)
#public
192.168.1.130 up1.node.com up1
192.168.1.130 up2.node.com up2
#virtual
192.168.1.82 up1vip.node.com up1vip
192.168.1.92 up2vip.node.com up2vip
#private
172.16.1.131 up1priv.node.com up1priv
172.16.1.132 up2priv.node.com up2priv
#scanip
192.168.1.71 scanip.node.com scanip
操作:
一.系统配置:
1.关闭防火墙和selinux
2
个节点都做的:
chkconfig NetworkManager off
chkconfig iptables off
/etc/init.d/iptables stop
chkconfig --list | grep iptables
vim /etc/selinux/config
SELINUX=disabled
SELINUXTYPE=targeted
2.配置好主机IP
地址和主机名
节点1
:
[root@up1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
HWADDR=08:00:27:81:fc:b2
ONBOOT=yes
IPADDR=192.168.1.130
BOOTPROTO=none
NETMASK=255.255.255.0
TYPE=Ethernet
[root@up1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1 ifcfg-eth1是ifcfg-eth0拷贝过来的,这个地方要注意修改名字
HWADDR=08:00:27:67:a8:42
NM_CONTROLLED=yes
ONBOOT=yes
IPADDR=172.16.1.131
BOOTPROTO=none
NETMASK=255.255.255.0
TYPE=Ethernet
IPV6INIT=no
USERCTL=no
[root@up1 ~]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=up1.node.com
节点2
:
[root@up2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
HWADDR=08:00:27:1e:e7:fa
NM_CONTROLLED=yes
ONBOOT=yes
IPADDR=192.168.1.140
BOOTPROTO=none
NETMASK=255.255.255.0
TYPE=Ethernet
IPV6INIT=no
USERCTL=no
[root@up2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
HWADDR=08:00:27:3a:fa:76
NM_CONTROLLED=yes
ONBOOT=yes
IPADDR=172.16.1.141
BOOTPROTO=none
NETMASK=255.255.255.0
TYPE=Ethernet
IPV6INIT=no
USERCTL=no
[root@up2 ~]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=up2.node.com
3.关闭NTP
服务(使用oracle
的NTP
服务,必须禁用系统的NTP
服务)
2
个节点都做的:
service ntpd stop
chkconfig ntpd off
/etc/init.d/ntpd status
两个节点都执行
mv /etc/ntp.conf /etc/ntp.conf.bak
4.配置dns
服务(1
节点是dns server
)
[root@up1 ~]# hostname
up1.node.com
[root@up12 ~]# hostname
up2.node.com
–让虚拟机挂载光盘,为了安装所需软件包(yum
)
配置DNS
:
4.1.每台节点都需要安装dns
包
mkdir /source
mount /dev/cdrom /source/
vim /etc/yum.repos.d/x.repo
[ok]
name=ok
baseurl=file:///source/
gpgcheck=0
enabled=1
yum clean all
yum list
两个节点都需要做
yum install bind* -y
4.2.主节点配置:
[root@up1 ~]# cd /var/named/chroot/etc/
[root@up1 etc]# pwd
/var/named/chroot/etc
[root@up1 etc]# cat named.conf
options {
directory "/dba";
};
zone "node.com" in {
type master;
file "node.com.zone";
};
zone "1.168.192.in-addr.arpa" in {
type master;
file "192.168.1.zone";
};
zone "0.0.127.in-addr.arpa" in {
type master;
file "127.0.0.zone";
};
[root@up1 etc]# cd /var/named/chroot/
[root@up1 chroot]# mkdir dba
[root@up1 chroot]# cd dba
[root@up1 dba]# ls
127.0.0.zone 192.168.1.zone node.com.zone
[root@up1 dba]# vim node.com.zone
$TTL 86400
@ IN SOA rac1.node.com. root. (
2017040302 ; serial (d. adams)
3H ; refresh
15M ; retry
1W ; expiry
1D ) ; minimum
IN NS rac1.node.com.
IN NS rac2.node.com.
rac1 IN A 172.16.1.111
rac2 IN A 172.16.1.112
rac1vip IN A 172.16.1.121
rac2vip IN A 172.16.1.122
scanip IN A 172.16.1.56
[root@up1 dba]# vim 192.168.1.zone
$TTL 86400
@ IN SOA rac1.node.com. root.node.com. (
2017040302 ; serial (d. adams)
3H ; refresh
15M ; retry
1W ; expiry
1D ) ; minimum
IN NS rac1.node.com.
IN NS rac2.node.com.
111 IN PTR rac1.node.com.
112 IN PTR rac2.node.com.
121 IN PTR rac1vip.node.com.
122 IN PTR rac2vip.node.com.
56 IN PTR scanip.node.com.
[root@up1 dba]# vim 127.0.0.zone
$TTL 86400
@ IN SOA up1.node.com. root.node.com. (
2017040302 ; serial (d. adams)
3H ; refresh
15M ; retry
1W ; expiry
1D ) ; minimum
IN NS up1.node.com.
IN NS up2.node.com.
1 IN PTR localhost.node.com.
[root@up1 dba]# pwd
/var/named/chroot/dba
[root@up1 dba]# chown root.named *
[root@up1 dba]# chmod 775 *
[root@up1 dba]# ls -l
total 12
-rwxrwx--- 1 root named 529 Apr 3 23:42 127.0.0.zone
-rwxrwx--- 1 root named 794 Apr 3 23:41 192.168.1.zone
-rwxrwx--- 1 root named 790 Apr 3 23:41 node.com.zone
[root@up1 dba]# vim /etc/resolv.conf
search node.com
nameserver 172.16.1.111
domain node.com
4.3.从节点配置:
[root@up2 etc]# pwd
/var/named/chroot/etc
[root@up2 etc]# vim named.conf
options {
directory "/dba1";
};
zone "node.com" in {
type slave;
file "node.com.zone";
masters {192.168.1.130;};
};
zone "1.168.192.in-addr.arpa" in {
type slave;
file "192.168.1.zone";
masters{192.168.1.130;};
};
zone "0.0.127.in-addr.arpa" in {
type slave;
file "127.0.0.zone";
masters{192.168.1.130;};
};
[root@up2 chroot]# pwd
/var/named/chroot
[root@up2 chroot]# mkdir dba1
[root@up2 chroot]# chown root:named dba1
[root@up2 chroot]# chmod 775 dba1
[root@up2 chroot]# cd dba1
[root@up2 dba1]# vim /etc/resolv.conf
search node.com
nameserver 172.16.1.111
domain node.com
4.4.主从2个节点按顺序都启动dns
服务
主:
/etc/init.d/named restart
chkconfig named on
从:
/etc/init.d/named restart
chkconfig named on
在启动DNS
服务时,出现Generating /etc/rndc.key
并且卡住导致服务启动不了。
/etc/rndc.key
是一个密钥文件
rndc: Remove Name Domain Controller
远程域名服务器的控制器
先退出以后发现本机并没有/etc/rndc.key
文件
解决:rndc-confgen -r /dev/urandom -a
使用命令生成这个密钥文件
重新启动即可
4.5.主从2
个节点按顺序测试:
主:
ping up1
ping up2
ping up1.node.com
ping up2.node.com
从:
ping up1
ping up2
ping up1.node.com
ping up2.node.com
[root@up2 ~]# nslookup up1vip.node.com
Server: 192.168.1.130
Address: 192.168.1.130#53
Name: up1vip.node.com
Address: 192.168.1.82
[root@up2 ~]# nslookup up2vip.node.com
Server: 192.168.1.130
Address: 192.168.1.130#53
Name: up2vip.node.com
Address: 192.168.1.92
[root@up2 ~]# nslookup up1vip
Server: 192.168.1.130
Address: 192.168.1.130#53
Name: up1vip.node.com
Address: 172.16.20.82
[root@up2 ~]# nslookup up2vip
Server: 192.168.1.130
Address: 192.168.1.130#53
Name: up2vip.node.com
Address: 192.168.1.92
[root@up2 ~]# nslookup scanip
Server: 192.168.1.130
Address: 192.168.1.130#53
Name: scanip.node.com
Address: 192.168.1.73
Name: scanip.node.com
Address: 192.168.1.71
Name: scanip.node.com
Address: 192.168.1.72
5.建立安装用户,配置相应目录,给予权限,配置环境变量
5.1 所有节点grid
用户
groupadd -g 1000 oinstall
groupadd -g 1200 asmadmin
groupadd -g 1201 asmdba
groupadd -g 1202 asmoper
useradd -u 1100 -g oinstall -G asmadmin,asmdba,asmoper -d /home/grid -s /bin/bash -c "grid Infrastructure Owner" grid
echo "grid" | passwd --stdin grid
1
节点:
[root@up1 src]# su - grid
[grid@up1 ~]$ vim .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_TERM=xterm
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
alias sqlplus='/usr/local/bin/rlwrap sqlplus'
2
节点:
[root@up2 src]# su - grid
[grid@up2 ~]$ vim .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_TERM=xterm
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
alias sqlplus='/usr/local/bin/rlwrap sqlplus'
5.2 oracle
用户
所有节点root
执行:
groupadd -g 1300 dba
groupadd -g 1301 oper
useradd -u 1101 -g oinstall -G dba,oper,asmdba -d /home/oracle -s /bin/bash -c "Oracle Software Owner" oracle
echo "oracle" | passwd --stdin oracle
1
节点:
su - oracle
[oracle@up1 ~]$ vim .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=racdb1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export ORACLE_UNQNAME=racdb
export TNS_ADMIN=$ORACLE_HOME/network/admin
export ORACLE_TERM=xterm
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:/u01/app/11.2.0/grid/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
alias sqlplus='/usr/local/bin/rlwrap sqlplus'
alias rman='/usr/local/bin/rlwrap rman'
umask 022
2
节点:
su - oracle
[oracle@up2 ~]$ vim .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=racdb2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export ORACLE_UNQNAME=racdb
export TNS_ADMIN=$ORACLE_HOME/network/admin
export ORACLE_TERM=xterm
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:/u01/app/11.2.0/grid/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
alias sqlplus='/usr/local/bin/rlwrap sqlplus'
alias rman='/usr/local/bin/rlwrap rman'
umask 022
2
个节点都做root
执行:
mkdir -p /u01/app/grid
mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/oracle
mkdir -p /u01/app/oracle/product/11.2.0/db_1
chown -R oracle:oinstall /u01/
chown -R grid:oinstall /u01/app/grid
chown -R grid:oinstall /u01/app/11.2.0
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01
6.配置系统参数(2
个节点都做)
限制参数
echo "oracle soft nproc 2047" >>/etc/security/limits.conf
echo "oracle hard nproc 16384" >>/etc/security/limits.conf
echo "oracle soft nofile 1024" >>/etc/security/limits.conf
echo "oracle hard nofile 65536" >>/etc/security/limits.conf
echo "grid soft nproc 2047" >>/etc/security/limits.conf
echo "grid hard nproc 16384" >>/etc/security/limits.conf
echo "grid soft nofile 1024" >>/etc/security/limits.conf
echo "grid hard nofile 65536" >>/etc/security/limits.conf
echo "session required /lib/security/pam_limits.so" >>/etc/pam.d/login
echo "session required pam_limits.so" >>/etc/pam.d/login
内核参数
echo "fs.aio-max-nr = 1048576" >> /etc/sysctl.conf
echo "fs.file-max = 6815744" >> /etc/sysctl.conf
echo "kernel.shmall = 2097152" >> /etc/sysctl.conf
echo "kernel.shmmax = 1054472192" >> /etc/sysctl.conf
echo "kernel.shmmni = 4096" >> /etc/sysctl.conf
echo "kernel.sem = 250 32000 100 128" >> /etc/sysctl.conf
echo "net.ipv4.ip_local_port_range = 9000 65500" >> /etc/sysctl.conf
echo "net.core.rmem_default = 262144" >> /etc/sysctl.conf
echo "net.core.rmem_max = 4194304" >> /etc/sysctl.conf
echo "net.core.wmem_default = 262144" >> /etc/sysctl.conf
echo "net.core.wmem_max = 1048586" >> /etc/sysctl.conf
echo "net.ipv4.tcp_wmem = 262144 262144 262144" >> /etc/sysctl.conf
echo "net.ipv4.tcp_rmem = 4194304 4194304 4194304" >> /etc/sysctl.conf
sysctl -p
7.安装软件包(DNS
已经配置好yum
)(2
个节点都做)
yum install -y readline* binutils compat-libstdc++ compat-libstdc++ elfutils-libelf elfutils-libelf-devel expat gcc gcc-c++ glibc glibc glibc-common glibc-devel glibc-headers libaio libaio libaio-devel libaio-devel libgcc libgcc libstdc++ libstdc++ libstdc++-devel make pdksh sysstat unixODBC unixODBC unixODBC-devel unixODBC-devel binutils libaio-devel libaio elfutils-libelf-devel compat-libstdc++-33 libgcc gcc gcc-c++ glibc sysstat libstdc++ libstdc++-devel unixODBC-devel unixODBC
如果少包再试试下面的(通常是不用):
yum install binutils compat-libcap1 compat-libstdc++-33 gcc gcc-c++ glibc glibc-devel ksh libgcc libstdc++ libstdc++-devel libaio libaio-devel make sysstat unixODBC-devel unixODBC
2
个节点都做重启一次:(让修改的selinux
设置生效,添加共享磁盘,光驱删除盘片)去配置openfiler
########openfiler配置#########
一.安装openfiler
内核版本 2.6 x86
其他
内存 512M
硬盘200G
光驱盘:openfiler-2.3-x86-disc1.iso
启动安装:回车
跳过校验:skip
next
默认:next
手动分区: manually partion with disk druid
yes
分区: /boot 200M
/ 10000M
swap
内存2
倍
yes
配置IP
主机名 192.168.10.251/24
(此网段要和公网ip
网段一样)
主机名 openfiler.node.com
contiue
不配置DNS
和gateway
地区选择:上海
root
密码
next
安装完成后重启
root
登陆
根据提示访问:
https://192.168.10.251:446/
用户名:openfiler
密码:password
1.打开 iscsi
服务
Services
–》iSCSI target server
–》Enable
2.配置系统信息,增加一个网络,用于共享存储使用
System
--》 Network Access Configuration
--》
Name
: rac_net
Network/Host :192.168.10.0 注意:此处写的是网段,与公网ip相同的网段。
Netmask : 255.255.255.0
Type : share
Update
3. 做共享存储
3.1 相当于建立PV
建立扩展分区,再建立逻辑分区(3个:4G, 6G, 20G
)
Volumes–》create new physical volumes --》 /dev/sda
–》Create a partition in /dev/sda–》Extended Partition :create
–》logical : Physical Volume : create
如下:
/dev/sda4 Extended Partition (0x5) 4 1432 26108 198218002 1024 bytes Primary Extended partitions exist
/dev/sda5 Linux Physical Volume (0x8e) 5 1432 2000 4570461 4.36 GB Logical Delete
/dev/sda6 Linux Physical Volume (0x8e) 6 2001 2800 6425968 6.13 GB Logical Delete
/dev/sda7 Linux Physical Volume (0x8e) 7 2801 6000 25703968 24.51 GB Logical Delete
做完上步骤再次点击volumes
3.2 相当于建立VG
Volumes
rac_vot
/dev/sda5 4.36 GB
rac_arc
/dev/sda6 6.13 GB
rac_data
/dev/sda7 24.51 GB
3.3 相当于建立LV
Volumes
--》 右面的:Add Volume
Volume Name (no spaces. Valid characters [a-z,A-Z,0-9]): rac_dvot
Required Space (MB): 100%
Filesystem / Volume type: iSCSI
3个新分的磁盘全做
注意:每次分完一个磁盘组后,点击change
切换,然后再add volumes
rac_data
Rac_arc
Rac_vot
Volumes
–》iSCSI Targets
Target Configuration
名字可以随便起
LUN Mapping :map
0 /dev/rac_data/rac_dvot write-thru wcGuP3-i9WH-g4Zq wcGuP3-i9WH-g4Zq blockio
1 /dev/rac_arc/rac_darc write-thru 9GiCTG-Va2d-G3ga 9GiCTG-Va2d-G3ga blockio
2 /dev/rac_vot/rac_dd write-thru hwz5Yq-GwO1-3Tzm hwz5Yq-GwO1-3Tzm blockio
Network ACL : allow : update
Status --》 ISCSI Targets
看到:iqn.2017-06.com.up:racdisk1
这个就OK了
Open sessions for iSCSI target iqn.2017-06.com.up:racdisk1
######openfiler配置完成########
openfiler
配置完成之后,需要重启才能看到新的磁盘
8. 使用udev
配置共享磁盘 (两个节点都需要做)
yum install iscsi* -y
/etc/init.d/iscsid start
iscsiadm -m discovery -t sendtargets -p 192.168.1.150:3260
如果报错
[root@rac1 ~]# iscsiadm -m discovery -t sendtargets -p 172.16.1.55:3260
iscsiadm: No portals found
cat /etc/initiators.deny
# PLEASE DO NOT MODIFY THIS CONFIGURATIONFILE!
# This configuration file was autogenerated
# by Openfiler. Any manual changes will be overwritten
# Generated at: Sun Sep 16 6:16:12 CST 2012
filesystem ALL
# End of Openfiler configuration
注释掉/etc/initiators.deny文件里的内容即可正常连接。这个文件在openfiler服务器上
/etc/init.d/iscsi start
chkconfig iscsi on
chkconfig iscsid on
8.1、在rac1
进行分区,每个磁盘都使用全部空间分配一个主分区:
[root@rac1 ~]# fdisk -l
分区:
fdisk /dev/sdb
n,p,1,回车,回车,w
fdisk /dev/sdc
n,p,1,回车,回车,w
fdisk /dev/sdd
n,p,1,回车,回车,w
[root@up1 ~]# partprobe /dev/sdb
[root@up1 ~]# partprobe /dev/sdc
[root@up1 ~]# partprobe /dev/sdd
查看:
[root@rac1 ~]# fdisk -l
8.2、在rac2
上使用fdisk –l
可以查看到与rac1
一样的分区情况后,再继续。
8.3、对比两个节点的共享磁盘的UID
,确定两边完全一致:
节点1:
[root@up1 ~]# /sbin/scsi_id -g -u /dev/sdb
1ATA_VBOX_HARDDISK_VB6116258c-7210e337
[root@up1 ~]# /sbin/scsi_id -g -u /dev/sdc
1ATA_VBOX_HARDDISK_VB02cf4964-aad254ab
[root@up1 ~]# /sbin/scsi_id -g -u /dev/sdd
1ATA_VBOX_HARDDISK_VBb6362300-4cd51593
节点2:
[root@up2 ~]# /sbin/scsi_id -g -u /dev/sdb
1ATA_VBOX_HARDDISK_VB6116258c-7210e337
[root@up2 ~]# /sbin/scsi_id -g -u /dev/sdc
1ATA_VBOX_HARDDISK_VB02cf4964-aad254ab
[root@up2 ~]# /sbin/scsi_id -g -u /dev/sdd
1ATA_VBOX_HARDDISK_VBb6362300-4cd51593
8.4、udev
绑定磁盘
节点1
要:
cd /tmp
vim a.sh
#! /bin/bash
for i in b c d ;
do
echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\"" >>/etc/udev/rules.d/99-oracle-asmdisk.rules
done
chmod +x a.sh
./a.sh
scp /etc/udev/rules.d/99-oracle-asmdisk.rules up2:/etc/udev/rules.d/99-oracle-asmdisk.rules
2
个节点都要:
[root@rac1 /]# cat /etc/udev/rules.d/99-oracle-asmdisk.rules
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB6116258c-7210e337", NAME="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB02cf4964-aad254ab", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBb6362300-4cd51593", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
2
个节点都要执行:
/sbin/start_udev
ll /dev/asm*
brw-rw---- 1 grid asmadmin 8, 16 Apr 16 02:41 /dev/asm-diskb
brw-rw---- 1 grid asmadmin 8, 32 Apr 16 02:41 /dev/asm-diskc
brw-rw---- 1 grid asmadmin 8, 48 Apr 16 02:41 /dev/asm-diskd
2
个节点都要添加自启动udev
:
vi /etc/rc.d/rc.local
加入:
/sbin/start_udev
9.配置互信
配置grid
用户的互信 可以在安装界面做
配置oracle
用户的互信 可以在安装界面做
10.上传安装介质,安装和指定lib
包
在主节点上上传oracle
和grid
安装包。(rlwrap
这个插件包需要两个节点都要解压并安装)
[root@up1 src]# pwd
/usr/local/src
[root@up1 src]# ls
p10404530_112030_Linux-x86-64_1of7.zip
p10404530_112030_Linux-x86-64_2of7.zip
p10404530_112030_Linux-x86-64_3of7.zip
rlwrap-0.32.tar.gz
tar zxvf rlwrap-0.32.tar.gz
unzip p10404530_112030_Linux-x86-64_1of7.zip
unzip p10404530_112030_Linux-x86-64_2of7.zip
unzip p10404530_112030_Linux-x86-64_3of7.zip
[root@up1 src]# ls
database
grid
p10404530_112030_Linux-x86-64_1of7.zip
p10404530_112030_Linux-x86-64_2of7.zip
p10404530_112030_Linux-x86-64_3of7.zip
rlwrap-0.32
rlwrap-0.32.tar.gz
chown -R oracle:oinstall database
chown -R grid:oinstall grid
安装插件包 rlwrap-0.32.tar.gz
cd rlwrap-0.32
./configure
make ; make install
2
个节点都安装cvuqdisk
和包:
[root@up1 rpm]# pwd
/usr/local/src/grid/rpm
[root@up1 rpm]# ls
cvuqdisk-1.0.9-1.rpm
[root@up1 rpm]# rpm -ivh cvuqdisk-1.0.9-1.rpm
[root@up1 rpm]# scp cvuqdisk-1.0.9-1.rpm up2:/usr/local/src/
[root@up1 rpm]# ssh up2
[root@up2 ~]# cd /usr/local/src/
[root@up2 src]# rpm -ivh cvuqdisk-1.0.9-1.rpm
2
个节点检查:/lib64/libcap.so.1
1
节点别忘了退出up2
find / -name libcap*
cd /lib64/
ls -lrt libcap*
ln -s libcap.so.2.16 libcap.so.1
安装grid
2、环境检查
先root
执行xhost +
允许其他用户调用图形桌面
su - grid
cd /usr/local/src/grid
./runcluvfy.sh stage -post hwos -n up1,up2 -verbose >>chk.log
防火墙要关!
3、启动安装
./runInstaller
1.选择更新:选择Skip software updates
,忽略更新,直接安装。
3.选择Advanced Installation
(自定义)
4.选择支持的语言,这里我们添加一个简体中文的语言(语言可选可不选)
5.Cluster Name
(集群名字)随意起即可;SACN Name
:写scanip
即可,根据DNS
当中sacnip
的解析域名也行。(如scanip.node.com
)
问题:如果这个地方过不去怎么办?
将vip
和scanip
的域名解析加入/etc/hosts
文件中即可。(如下图所示)
6.这里需要添加第二个节点
注意事项:
Public Hostname
和Virtual Hostname
必须是以主机名的形式显示,而不是ip
。如果是ip
形式说明DNS
有问题(解决方法在下面)Virtual Hostname
中不能有“-
”,因为系统不识别,所以要将“-
”去掉
3.需要先建立互信(setup
),然后在测试互信(test
)
问题1:
如果不能正确识别主机名,显示ip
地址的话,怎么处理:
2
个节点重启
/etc/init.d/named restart ;/etc/init.d/network restart
cd /tmp/ rm -rf * rm -rf .*
这样重新进入图形界面 就不会报错(能正确识别主机名)
问题2:
互信过不去 处理(有待验证):
防火墙 selinux
关闭
删除grid
用户
重建grid
用户,重新对相应目录给予权限,配置环境变量。之后2
个节点重启
/etc/init.d/named restart
/etc/init.d/network restart
上个界面和再上个界面出问题,极有可能是 dns
配置和 ip
地址没有配置正确。
注:test ssh
7.这里会显示公网和私网的ip
网段,如果两台服务器的eth0
和eth0
或者eth1
和eth1
不在同一网段会报错。
8.选择磁盘管理方式(ASM
自动磁盘管理)。11g
用ASM
管理;10g用共享文件系统和raw
因为ASM bug
多
当status
这个位置不是 candidate
这个状态的时候,意味着这个盘不能被使用
处理:
清除磁盘信息(实际是清除磁盘头部信息)
这个好用
dd if=/dev/zero of=/dev/asm-diskb bs=8192 count=528000
下面2
个没用
dd if=/dev/zero of=/dev/sdb1 bs=8192 count=528000
dd if=/dev/zero of=/dev/sdb bs=8192 count=528000
10.统一设置密码
IPMI
(Intelligent Platform Management Interface
,智能平台管理接口),是标准的工业管理协议。IPMI
是一种规范的标准,其中重要的物理部件就是BMC
(Baseboard Management Controller
),是一种嵌入式管理微控制器,相当于整个平台管理的大脑,通过它IPMI可以监控各个传感器的数据并记录各种事件的日志,因此,IPMI
也称为BMC
。不用它
12.默认即可(根据自己的来)
13.grid
的grid base
和grid home
(默认即可)
这个位置报错,2
个原因:
1环境变量配置错误,没有这个路径
2权限不足
14.恢复目录(默认即可)
这个位置报错,2
个原因:
1环境变量配置错误,没有这个路径
2权限不足
注意有没有 error
、 warning
基本上是不予理会。
检查结果:有几个内核参数检测失败,更改时必须两个节点都更改
常见错误对于包:(node2
可以使用sftp
获取node1
上面的包)
grid/rpm
[root@node1 rpm]# rpm -ivh cvuqdisk-1.0.9-1.rpm
注:第一个elfutils
包如果有提示没装就补装,第二个pdksh
包忽略;Task resolv.conf
检查失败是因为没配置DNS
,忽略。
注:PRVF-9802
,无法验证共享磁盘,只要确定共享磁盘没问题,该问题可忽略
第二个脚本出错的话,很有可能没有安装cvuqdisk-1.0.9-1.rpm
这个包(两个节点都需要安装)
第二个脚本如果出错,重新执行root.sh
之前别忘了删除配置:/u01/app/11.2.0/grid/crs/install/roothas.pl -deconfig -force -verbose
注:每个脚本先在rac1
运行,成功后在rac2
运行
脚本2
最容易出错:
orainstRoot.sh
:
这个脚本是创建oraInst.loc
,然后给这个产品目录修改权限,注意组权限是读写,宿主为777
,oraInventory
的组要是oinstall
root.sh:
1 cp grid
用户的$ORACLE_HOME/bin
下的oraenv、dbhome、coraenv
可执行程序到/usr/local/bin
路径下;
2 创建/etc/oratab
文件;
3 创建grid
用户的OCR keys
文件;
4 启动ohasd
守护进程,并且将ohasd
守护进程的启动文件写入到/etc/inittab
文件中,以便于ohasd
守护进程随着操作系统的启动而启动。如果是Oracle Enterprise Linux 6
及以上版本的操作系统的话,则将该配置信息写入到/etc/init/oracle-ohasd.conf
的独立配置文件中。
以下是脚本执行的过程
noed1
[root@node1 grid]# /oracle/app/oraInventory/orainstRoot.sh
Changing permissions of /oracle/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /oracle/app/oraInventory to oinstall.
The execution of the script is complete.
[root@node1 grid]# id
uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),4(adm),6(disk),10(wheel)
[root@node1 grid]#
node2
脚本一:/oracle/app/oraInventory/orainstRoot.sh
[root@node2 grid]# /oracle/app/oraInventory/orainstRoot.sh
Changing permissions of /oracle/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /oracle/app/oraInventory to oinstall.
The execution of the script is complete.
[root@node2 grid]#
node1
脚本二:/oracle/11.2.0/grid/crs/root.sh
[root@node1 grid]# /oracle/11.2.0/grid/crs/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /oracle/11.2.0/grid/crs
Enter the full pathname of the local bin directory: [/usr/local/bin]: --回车
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /oracle/11.2.0/grid/crs/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
OLR initialization - successful
root wallet
root wallet cert
root cert export
peer wallet
profile reader wallet
pa wallet
peer wallet keys
pa wallet keys
peer cert request
pa cert request
peer cert
pa cert
peer root cert TP
profile reader root cert TP
pa root cert TP
peer pa cert TP
pa peer cert TP
profile reader pa cert TP
profile reader peer cert TP
peer user cert
pa user cert
Adding Clusterware entries to inittab
CRS-2672: Attempting to start 'ora.mdnsd' on 'node1'
CRS-2676: Start of 'ora.mdnsd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'node1'
CRS-2676: Start of 'ora.gpnpd' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'node1'
CRS-2672: Attempting to start 'ora.gipcd' on 'node1'
CRS-2676: Start of 'ora.gipcd' on 'node1' succeeded
CRS-2676: Start of 'ora.cssdmonitor' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'node1'
CRS-2672: Attempting to start 'ora.diskmon' on 'node1'
CRS-2676: Start of 'ora.diskmon' on 'node1' succeeded
CRS-2676: Start of 'ora.cssd' on 'node1' succeeded
已成功创建并启动 ASM。
已成功创建磁盘组OCR_VOTE。
clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-4256: Updating the profile
Successful addition of voting disk 16a36bc2d6d04fb2bf0f1da5fab701a9.
Successfully replaced voting disk group with +OCR_VOTE.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
-## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 16a36bc2d6d04fb2bf0f1da5fab701a9 (/dev/raw/raw1) [OCR_VOTE]
Located 1 voting disk(s).
CRS-2672: Attempting to start 'ora.asm' on 'node1'
CRS-2676: Start of 'ora.asm' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.OCR_VOTE.dg' on 'node1'
CRS-2676: Start of 'ora.OCR_VOTE.dg' on 'node1' succeeded
CRS-2672: Attempting to start 'ora.registry.acfs' on 'node1'
CRS-2676: Start of 'ora.registry.acfs' on 'node1' succeeded
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@node1 grid]#
node2
脚本二:/oracle/11.2.0/grid/crs/root.sh
[root@node2 grid]# /oracle/11.2.0/grid/crs/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /oracle/11.2.0/grid/crs
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /oracle/11.2.0/grid/crs/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
OLR initialization - successful
Adding Clusterware entries to inittab
CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node node1, number 1, and is terminating
An active cluster was found during exclusive startup, restarting to join the cluster
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@node2 grid]#
跑完脚本然后点OK
到100%
的时候报错,可以直接点击OK
,然后skip
。查看日志发现几个error
,
INFO: Checking name resolution setup for "dbscan"...
INFO: ERROR:
INFO: PRVG-1101 : SCAN name "dbscan" failed to resolve
INFO: ERROR:
INFO: PRVF-4657 : Name resolution setup check for "dbscan" (IP address: 192.168.16.30) failed
INFO: ERROR:
INFO: PRVF-4664 : Found inconsistent name resolution entries for SCAN name "dbscan"
INFO: Verification of SCAN VIP and Listener setup failed
INFO: Checking OLR integrity...
INFO: Checking OLR config file...
INFO: OLR config file check successful
INFO: Checking OLR file attributes...
INFO: OLR file check successful
INFO: WARNING:
INFO: Checking name resolution setup for "dbscan"...
INFO: ERROR:
INFO: PRVG-1101 : SCAN name "dbscan" failed to resolve
INFO: ERROR:
INFO: PRVF-4657 : Name resolution setup check for "dbscan" (IP address: 192.168.16.30) failed
INFO: ERROR:
INFO: PRVF-4664 : Found inconsistent name resolution entries for SCAN name "dbscan"
INFO: Verification of SCAN VIP and Listener setup failed
INFO: Checking OLR integrity...
INFO: Checking OLR config file...
INFO: OLR config file check successful
INFO: Checking OLR file attributes...
INFO: OLR file check successful
这个错误是scan
解析失败,在os
中ping
一下scanip
和scan name
如果能ping
通的话,那就没问题,直接ok
,然后skip
注:忽略
ASMCA
创建磁盘组DATA
,ARC
[grid@node1 ~]$ asmca
第一章 安装rac db
注:使用oracle
用户安装。
一.启动安装
[oracle@rac1 database]$ ./runInstaller
注:忽略
以下是脚本执行过程
node1
[root@node1 db_1]# /oracle/app/oracle/product/11.2.0/db_1/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /oracle/app/oracle/product/11.2.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]: --回车
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@node1 db_1]#
node2:
[root@node2 db_1]# /oracle/app/oracle/product/11.2.0/db_1/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /oracle/app/oracle/product/11.2.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
[root@node2 db_1]#
跑完后点OK
三.创建RAC db
su – oracle dbca
注:general purpose
。。。方式建库比较快,生产库建议custom database
下面2
个参数添加的是:oracle
用户环境变量里的ORACLE_UNQNAME
global database name : ORACLE_UNQNAME
sid prefix : ORACLE_UNQNAME
这个地方要点 select all
,选择admin-managed
说明:
Policy-Managed方式介绍 基于策略的管理方式,是以服务器池(Server
Pools)为基础的,简单地说,就是先定义一些服务器池,池中包含一定量的服务器,然后再定义一些策略,根据这些策略Oracle会自动决定让多少数据
库实例运行在池中的几台机器上。数据库实例名后缀、数据库实例个数、所运行的主机,这些都是通过策略决定的,而不是数据库管理员事先定好的。
何种环境适合使用这种新的方式进行管理?
当管理大量的服务器集群,并且在这些集群中运行着多种不同重要程度,不同策略的RAC数据库时,为了简化管理,建议使用Policy-
Managed方式,实际上Oracle也建议只有在超过3台的服务器的时候才使用Policy-Managed来管理整个数据库集群。想象一下使用
Policy-Managed方式可以达到的效果:如果我们有10台服务器组成,根据不同的应用的重要性定义服务器池的关键程度,然后在其中某些机器意外
停机的情况下,仍然可以自动地保持足够多的机器给重要的系统提供数据库服务,而将不关键的系统数据库服务器个数降低到最低限度。
策略管理:DBA指定数据库资源运行在哪个服务器池(排除generic or free)。Oracle
Clusterware负责将数据库资源放在一台服务器。 Policy managed: Database administrators
specify in which server pool (excluding generic or free) the database
resource will run. Oracle Clusterware is responsible for placing the
database resource on a server. 服务器以如下次序被分配入服务器池: Generic server pool
User assigned server pool Free Oralce Clusterware使用服务器池的重要性决定分配服务器次序:
按重要性次序分配服务器给所有服务器池,直到满足服务器池的最小数目要求 按重要性次序分配服务器给服务器池,直到它们满足服务器池的最大数目要求
默认,任何剩下的服务器加入FREE服务器池 策略管理数据库背后的目标是删除到1个特定实例或服务 服务的硬编码
数据库可以和1个服务器池关联(而不是特定的节点集)。服务器池决定被资源(数据库,服务,第三方应用程序)所需的最小和最大服务器数目。
数据库实例将运行在已被分配给服务器池的服务器上。(使用min_size决定数据库必需运行在哪些服务器,以及必需运行在多少服务器上)
既然被分配给服务器池的服务器可以动态地变更,这允许Oracle基于集群可用的服务器总数动态地交付服务。
数据库实例将启动在足够多的服务器上(受制于服务器的可用性)。无需硬编码规定数据库实例运行在哪些服务器上。
数据库的任何实例可以运行在任何节点上。在实例号和节点之间无固定的映射关系。
当服务器被释放/添加/删除时,他们按之前提及的规则被分配到存在的服务器池中。 理论上的例子
例如,如果1个集群,总共有8个节点组成,并且支持3个RAC数据库。每个数据库将定义服务器的最小和最大数目。
假设DB1定义最小4台、最多6台服务器(重要性为10), 假设DB2定义最小2台、最多3台服务器(重要性为7),
假设DB3定义最小2台、最多3台服务器(重要性为5)。
初始8节点将被配置成节点1-4被分配给DB1,节点5-6被分配给DB2,节点7-8被分配给DB3。如果节点3由于某种原因发生故障,系统将分配节点7或8给DB1,因为其比DB3有更高的重要性而且最小需要4台服务器,即使将导致DB3降到最小服务器水平以下。如果节点3被重新激活,将被立即分配给DB3以使数据库恢复到最小所需的服务器数。
如果第9个节点被添加到集群,将被分配给DB1,因为其重要性最高而且未满足最大服务器数。Admin-Managed方式介绍
实际上上面的表述已经明确说明了,Policy-Managed和Admin-Managed方式的差别。让我们再回顾一下,在以往我们创建一个RAC数
据库大概是怎样的方法,我们在dbca的界面中会选择要将数据库实例运行在整个集群中的几台机器上,或者是2台或者是3台,甚或是更多,但是只要在安装的
时候选定几台机器,那么以后如果不做增减节点的操作,就始终会在这几台机器上运行。而且,通常会根据主机名称的排序自动将每台主机上的数据库实例依次命名
为dbname1到dbnameN。这些在管理员安装完毕以后,都不会再自动变化,这就是Admin-Managed方式。
管理员管理:DBA指定数据库资源运行的所有服务器,并且按需手动放置资源。这是之前版本Oracle数据库使用的管理策略。
这里可以将EM
点掉。EM
是11g
新特性,图形化操作,但性能会消耗10%
左右。
统一设置sys
用户和system
用户密码
某同学报错,需要之前在grid
用下 用asmca
创建diskgroup
名字叫: DATA
,在数据库环境下,自动前面有+
号
这里需要开启归档模式
HR
或者SH
用户的模板
注释:这里需要关掉AMM
(自动内存管理,同时管理PGA
和SGA
,11g新特性),但会造成内存抖动。
关掉则会使用10g
的ASMM
(自动共享内存管理)
点下一步之后才会到下面的界面
注意编码
第二章 集群检查
su – grid
[grid@rac1 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@rac1 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.DATA.dg
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.FRA.dg
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.LISTENER.lsnr
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.OCR.dg
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.asm
ONLINE ONLINE rac1 Started
ONLINE ONLINE rac2 Started
ora.gsd
OFFLINE OFFLINE rac1
OFFLINE OFFLINE rac2
ora.net1.network
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.ons
ONLINE ONLINE rac1
ONLINE ONLINE rac2
ora.registry.acfs
ONLINE ONLINE rac1
ONLINE ONLINE rac2
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rac1
ora.cvu
1 ONLINE ONLINE rac1
ora.mydb.db
1 ONLINE ONLINE rac1 Open
2 ONLINE ONLINE rac2 Open
ora.oc4j
1 ONLINE ONLINE rac1
ora.rac1.vip
1 ONLINE ONLINE rac1
ora.rac2.vip
1 ONLINE ONLINE rac2
ora.scan1.vip
1 ONLINE ONLINE rac1
[oracle@rac1 database]$ srvctl status nodeapps
VIP rac1-vip is enabled
VIP rac1-vip is running on node: rac1
VIP rac2-vip is enabled
VIP rac2-vip is running on node: rac2
Network is enabled
Network is running on node: rac1
Network is running on node: rac2
GSD is disabled
GSD is not running on node: rac1
GSD is not running on node: rac2
ONS is enabled
ONS daemon is running on node: rac1
ONS daemon is running on node: rac2
srvctl status scan
srvctl config scan
srvctl config listener -n rac1
……
---------------------------
2个节点:
su - oracle
cd $ORACLE_HOME/sqlplus/admin
vim glogin.sql
set sqlprompt '_user"@"_connect_identifier> '
define _editor = 'vim'
一些异常:
chown root:oinstall /var/tmp/.oracle/npohasd
重新执行root.sh
之前别忘了删除配置:/u01/app/11.2.0/grid/crs/install/roothas.pl -deconfig -force -verbose
find / -name libcap*
cd /lib64/
ls -lrt libcap*
ln -s libcap.so.2.16 libcap.so.1
重新安装grid
:
清除grid
软件:
rm -rf /u01/app/grid/
rm -rf /u01/app/11.2.0/grid/
rm -rf /u01/app/oraInventory/
清除系统信息:
rm -rf /etc/oracle/*
rm -rf /tmp/*
rm -rf /var/tmp/*
rm -rf /etc/init.d/init.cssd
rm -rf /etc/init.d/init.crs
rm -rf /etc/init.d/init.crsd
rm -rf /etc/init.d/init.evmd
rm -rf /etc/init.d/init.ohasd
rm -rf /etc/rc2.d/K96init.crs
rm -rf /etc/rc2.d/S96init.crs
rm -rf /etc/rc3.d/K96init.crs
rm -rf /etc/rc3.d/S96init.crs
rm -rf /etc/rc5.d/K96init.crs
rm -rf /etc/rc5.d/S96init.crs
rm -rf /etc/oracle/scls_scr
rm -rf /etc/oraInst.loc
rm -rf /etc/oratab
rm -rf /tmp/.oracle
清除磁盘信息
dd if=/dev/zero of=/dev/sdc1 bs=8192 count=128000
dd if=/dev/zero of=/dev/sdd1 bs=8192 count=128000
dd if=/dev/zero of=/dev/asm-diskb bs=8192 count=528000