搭建RAC集群数据库命令配置搭建系统环境过程:
搭建RAC最重要的是前期工作,就是配备系统搭建环境。
俗话有说:磨刀不误砍柴工。这句话用在RAC的前期工作
最恰当不过了。经过多次的搭建测试表明:只要前期的
搭建系统环境配置无误,后面安装GI软件,oracle软件以及建库
就不会遇到各种报错。若中间环境配置的过程中稍有操作误差,
就会影响RAC集群数据库的安装工作,增加安装的工作量,要排查
错误,解决错误,甚至安装不到GI软件,或者安装不到oracle软件,
这样,就别说搭建RAC数据库了,也使前期的配置工作尽废。
使用虚拟机VM-VirtualBox
操作系统:redhat 5.5 32位
节点 ip ip-vip ip-priv
Node1 192.168.56.11 192.168.56.31 192.168.100.21
Node2 192.168.56.12 192.168.56.32 192.168.100.22
Rac_scan 192.168.56.25
----主机系统内存、网络储存等配置:
--节点1:
[root@node1 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node1
[root@node1 ~]#
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.56.11
NETMASK=255.255.255.0
GATEWAY=192.168.56.1
[root@node1 ~]#
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.100.21
GATEWAY=255.255.255.0
[root@node1 ~]#
--节点2:
[root@node2 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node2
[root@node2 ~]#
[root@node2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.56.12
NETMASK=255.255.255.0
GATEWAY=192.168.56.1
[root@node2 ~]#
[root@node2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.100.22
NETMASK=255.255.255.0
[root@node2 ~]#
--设置hosts文件:
[root@node1 ~]# vi /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost
::1 localhost6.localdomain6 localhost6
192.168.56.11 node1
192.168.56.31 node1-vip
192.168.100.21 node1-priv
192.168.56.12 node2
192.168.56.32 node2-vip
192.168.100.22 node2-priv
192.168.56.25 rac_scan
~
#两个节点一样设置。
----添加组或者用户:
--删除已存在的用户或者用户组:
[root@node1 ~]# cd /var/spool/mail
[root@node1 mail]# ls
oracle rpc tom
[root@node1 mail]# rm -rf oracle
[root@node1 mail]# cd /home
[root@node1 home]# ls
oracle tom
[root@node1 home]# rm -rf oracle/
[root@node1 home]# cd \
[root@node1 home]# cd \
>
[root@node1 ~]#
[root@node1 ~]# userdel oracle
[root@node1 ~]# groupdel dba
[root@node1 ~]# groupdel oinstall
[root@node1 ~]# groupdel oper
groupdel: group oper does not exist
[root@node1 ~]#
#删除原有的用户或者组,两个节点都是这样操作。
--添加用户或用户组:
[root@node1 ~]#
[root@node1 ~]# groupadd -g 200 oinstall
[root@node1 ~]# groupadd -g 201 dba
[root@node1 ~]# groupadd -g 202 oper
[root@node1 ~]# groupadd -g 203 asmadmin
[root@node1 ~]# groupadd -g 204 asmoper
[root@node1 ~]# groupadd -g 205 asmdba
[root@node1 ~]# useradd -u 200 -g oinstall -G dba,asmdba,oper oracle
[root@node1 ~]# useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
[root@node1 ~]#
[root@node2 ~]# cd /var/spool/mail
[root@node2 mail]# rm -rf oracle
[root@node2 mail]# cd /home
[root@node2 home]# rm -rf oracle/
[root@node2 home]# cd \
>
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]# userdel oracle
[root@node2 ~]# groupdel dba
[root@node2 ~]# groupdel oinstall
[root@node2 ~]# groupdel oper
groupdel: group oper does not exist
[root@node2 ~]#
[root@node2 ~]# groupadd -g 200 oinstall
[root@node2 ~]# groupadd -g 201 dba
[root@node2 ~]# groupadd -g 202 oper
[root@node2 ~]# groupadd -g 203 asmadmin
[root@node2 ~]# groupadd -g 204 asmoper
[root@node2 ~]# groupadd -g 205 asmdba
[root@node2 ~]# useradd -u 200 -g oinstall -G dba,asmdba,oper oracle
[root@node2 ~]# useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
[root@node2 ~]#
----创建相关目录并授权脚本:
--节点1:
[root@node1 ~]# pwd
/root
[root@node1 ~]# mkdir -p /u01/app/oraInventory
[root@node1 ~]# chown -R grid:oinstall /u01/app/oraInventory/
[root@node1 ~]# chmod -R 775 /u01/app/oraInventory/
[root@node1 ~]# mkdir -p /u01/11.2.0/grid
[root@node1 ~]# chown -R grid:oinstall /u01/11.2.0/grid/
[root@node1 ~]# chmod -R 775 /u01/11.2.0/grid/
[root@node1 ~]# mkdir -p /u01/app/oracle
[root@node1 ~]# mkdir -p /u01/app/oracle/cfgtoollogs
[root@node1 ~]# mkdir -p /u01/app/oracle/product/11.2.0/db_1
[root@node1 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@node1 ~]# chmod -R 775 /u01/app/oracle
[root@node1 ~]#
----------------------------
--节点2:
[root@node2 ~]# pwd
/root
[root@node2 ~]# mkdir -p /u01/app/oraInventory
[root@node2 ~]# chown -R grid:oinstall /u01/app/oraInventory/
[root@node2 ~]# chmod -R 775 /u01/app/oraInventory/
[root@node2 ~]# mkdir -p /u01/11.2.0/grid
[root@node2 ~]# chown -R grid:oinstall /u01/11.2.0/grid/
[root@node2 ~]# chmod -R 775 /u01/11.2.0/grid/
[root@node2 ~]# mkdir -p /u01/app/oracle
[root@node2 ~]# mkdir -p /u01/app/oracle/cfgtoollogs
[root@node2 ~]# mkdir -p /u01/app/oracle/product/11.2.0/db_1
[root@node2 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@node2 ~]# chmod -R 775 /u01/app/oracle
[root@node2 ~]#
----设置oracle用户和grid用户密码:
[root@node1 ~]#
[root@node1 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node1 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node1 ~]#
------------------------
[root@node2 ~]#
[root@node2 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node2 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node2 ~]#
#两个节点都同样设置。
----修改内核参数:
--添加内核文件内容1:
[root@node1 ~]# vi /etc/sysctl.conf
# Kernel sysctl configuration file for Red Hat Linux
#
# For binary values, 0 is disabled, 1 is enabled. See sysctl(8) and
# sysctl.conf(5) for more details.
... ...
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 536870912
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
--内核参数修改生效:
[root@node1 ~]# sysctl -p
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 4294967295
kernel.shmall = 268435456
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 536870912
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
[root@node1 ~]#
#两个节点同样的操作。
--添加内核文件内容2:
[root@node1 ~]# vi /etc/security/limits.conf
# /etc/security/limits.conf
#
#Each line describes a limit for a user in the form:
... ...
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
--添加内核文件内容3:
[root@node1 ~]# vi /etc/pam.d/login
session required /lib/security/pam_limits.so
#两个加点同样操作。
--添加内核文件内容4:
[root@node1 ~]# vi /etc/profile :
if [ $USER = "oracle" ]||[ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
#两个节点同样操作。
----关闭系统ntp服务,采用oracle 自带的时间同步服务:
--停止部分系统服务:
[root@node1 ~]#
[root@node1 ~]# chkconfig ntpd off
[root@node1 ~]# mv /etc/ntp.conf /etc/ntp.conf.bak
[root@node1 ~]# chkconfig sendmail off
[root@node1 ~]#
#两个节点同样操作。
--校验连个节点时间相差20s内:
[root@node1 ~]#
[root@node1 ~]# date
Fri Oct 28 12:23:11 CST 2016
[root@node1 ~]#
[root@node2 ~]#
[root@node2 ~]# date
Fri Oct 28 12:23:20 CST 2016
[root@node2 ~]#
----进入oracle与grid用户分别修改环境变量(所有节点):
#node1 ORACLE_SID=prod1 ORACLE_SID=+ASM1
#node2 ORACLE_SID=prod2 ORACLE_SID=+ASM2
---oracle用户:
--节点1:
[oracle@node1 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=prod1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
[oracle@node1 ~]$ . .bash_profile
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=prod2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
[oracle@node2 ~]$ . .bash_profile
[oracle@node2 ~]$
---grid用户:
--节点1:
[grid@node1 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/11.2.0/grid
export GRID_HOME=/u01/11.2.0/grid
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export THREADS_FLAG=native
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
~
".bash_profile" 23L, 484C written
[grid@node1 ~]$ . .bash_profile
[grid@node1 ~]$
--节点2:
[grid@node2 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/11.2.0/grid
export GRID_HOME=/u01/11.2.0/grid
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export THREADS_FLAG=native
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
~
".bash_profile" 23L, 484C written
[grid@node2 ~]$ . .bash_profile
[grid@node2 ~]$
----配置共享存储:
---通过ASM管理:
1)OCR DISK :存储CRS资源配置信息
2)VOTE DISK:仲裁盘,记录节点状态
3)Data Disk:存放datafile、controlfile、redologfile、spfile 等
4)Recovery Area:存放flashback database log、archive log、rman backup等
--查看磁盘大小情况:
[root@node1 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn't contain a valid partition table
[root@node1 ~]#
--分配磁盘分区:
[root@node1 ~]# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.
The number of cylinders for this disk is set to 3263.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
(e.g., DOS FDISK, OS/2 FDISK)
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
--分盘操作:
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-3263, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 2
First cylinder (124-3263, default 124):
Using default value 124
Last cylinder or +size or +sizeM or +sizeK (124-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 3
First cylinder (247-3263, default 247):
Using default value 247
Last cylinder or +size or +sizeM or +sizeK (247-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
e
Selected partition 4
First cylinder (370-3263, default 370):
Using default value 370
Last cylinder or +size or +sizeM or +sizeK (370-3263, default 3263):
Using default value 3263
Command (m for help): n
First cylinder (370-3263, default 370):
Using default value 370
Last cylinder or +size or +sizeM or +sizeK (370-3263, default 3263): +7G
Command (m for help): n
First cylinder (1222-3263, default 1222):
Using default value 1222
Last cylinder or +size or +sizeM or +sizeK (1222-3263, default 3263): +7G
Command (m for help): n
First cylinder (2074-3263, default 2074):
Using default value 2074
Last cylinder or +size or +sizeM or +sizeK (2074-3263, default 3263): +3G
Command (m for help): n
First cylinder (2440-3263, default 2440):
Using default value 2440
Last cylinder or +size or +sizeM or +sizeK (2440-3263, default 3263): +3G
Command (m for help): n
First cylinder (2806-3263, default 2806):
Using default value 2806
Last cylinder or +size or +sizeM or +sizeK (2806-3263, default 3263): +1G
Command (m for help): n
First cylinder (2929-3263, default 2929): +1G
Value out of range.
First cylinder (2929-3263, default 2929):
Using default value 2929
Last cylinder or +size or +sizeM or +sizeK (2929-3263, default 3263): +1G
Command (m for help): n
First cylinder (3052-3263, default 3052):
Using default value 3052
Last cylinder or +size or +sizeM or +sizeK (3052-3263, default 3263):
Using default value 3263
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
[root@node1 ~]#
#分区只需在节点1操作。
--查看磁盘分区情况:
[root@node1 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 123 987966 83 Linux
/dev/sdb2 124 246 987997+ 83 Linux
/dev/sdb3 247 369 987997+ 83 Linux
/dev/sdb4 370 3263 23246055 5 Extended
/dev/sdb5 370 1221 6843658+ 83 Linux
/dev/sdb6 1222 2073 6843658+ 83 Linux
/dev/sdb7 2074 2439 2939863+ 83 Linux
/dev/sdb8 2440 2805 2939863+ 83 Linux
/dev/sdb9 2806 2928 987966 83 Linux
/dev/sdb10 2929 3051 987966 83 Linux
/dev/sdb11 3052 3263 1702858+ 83 Linux
[root@node1 ~]#
--在node2上查看磁盘,由于是共享的,所有node2查看的磁盘已经分好区:
[root@node2 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 123 987966 83 Linux
/dev/sdb2 124 246 987997+ 83 Linux
/dev/sdb3 247 369 987997+ 83 Linux
/dev/sdb4 370 3263 23246055 5 Extended
/dev/sdb5 370 1221 6843658+ 83 Linux
/dev/sdb6 1222 2073 6843658+ 83 Linux
/dev/sdb7 2074 2439 2939863+ 83 Linux
/dev/sdb8 2440 2805 2939863+ 83 Linux
/dev/sdb9 2806 2928 987966 83 Linux
/dev/sdb10 2929 3051 987966 83 Linux
/dev/sdb11 3052 3263 1702858+ 83 Linux
[root@node2 ~]#
---ASM软件管理:
--创建spft文件夹,并上传rpm包:
[root@node1 ~]#
[root@node1 ~]# mkdir asm
[root@node1 ~]# ls
anaconda-ks.cfg asm Desktop install.log install.log.syslog
[root@node1 ~]#
[root@node1 ~]# cd asm
[root@node1 asm]# rz
rz waiting to receive.
开始 zmodem 传输。 按 Ctrl+C 取消。
100% 126 KB 126 KB/s 00:00:01 0 Errors686.rpm...
100% 13 KB 13 KB/s 00:00:01 0 Errors
100% 83 KB 83 KB/s 00:00:01 0 Errors...
#上传成功。
--安装rmp包:
[root@node1 asm]#
[root@node1 asm]# ls
oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm #该rmp包的版本要求与系统内核版本一直,查看内核版本的命令:uname -a.
oracleasm-support-2.1.8-1.el5.i386.rpm
oracleasmlib-2.0.4-1.el5.i386.rpm
[root@node1 asm]#
[root@node1 asm]#
[root@node1 asm]# rpm -ivh *
warning: oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [ 33%]
2:oracleasm-2.6.18-194.el########################################### [ 67%]
3:oracleasmlib ########################################### [100%]
[root@node1 asm]#
#安装完毕,两个节点同样的操作。
---配置oracleASM,两个节点同样操作:
[root@node1 asm]#
[root@node1 asm]# service oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 asm]#
[root@node1 asm]#
---建立oracle ASM磁盘:
--节点1:#只需在一个节点操作(这里是节点1),在另外一个节点2作另外操作其他命令。
[root@node1 asm]#
[root@node1 asm]# service oracleasm
Usage: /etc/init.d/oracleasm {start|stop|restart|enable|disable|configure|createdisk|deletedisk|querydisk|listdisks|scandisks|status}
[root@node1 asm]# service oracleasm createdisk OCR_VOTE1 /dev/sdb1
Marking disk "OCR_VOTE1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk OCR_VOTE2 /dev/sdb2
Marking disk "OCR_VOTE2" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk OCR_VOTE3 /dev/sdb3
Marking disk "OCR_VOTE3" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_DATA1 /dev/sdb5
Marking disk "ASM_DATA1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_DATA2 /dev/sdb6
Marking disk "ASM_DATA2" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_RCY1 /dev/sdb7
Marking disk "ASM_RCY1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_RCY2 /dev/sdb8
Marking disk "ASM_RCY2" as an ASM disk: [ OK ]
[root@node1 asm]#
[root@node1 asm]# service oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2
OCR_VOTE3
[root@node1 asm]#
--节点2:
[root@node2 asm]#
[root@node2 asm]# service oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 asm]#
[root@node2 asm]#
[root@node2 asm]# service oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2
OCR_VOTE3
[root@node2 asm]#
#可以看到在节点1建立的ASM磁盘已经映射到节点2.
----建立主机间的互信:
--建立节点之间oracle 、grid 用户之间的信任(通过ssh 建立公钥和私钥)
--生成密钥对(所有节点的oracle用户和grid用户):
---oracle用户:
--节点1oracle用户:
[root@node1 ~]# su - oracle
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh-keygen -t rsa #公钥,不用输密码,保留空#
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Created directory '/home/oracle/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
09:7d:4d:26:a5:3c:40:24:55:bd:25:5f:cd:e3:5f:73 oracle@node1
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh-keygen -t dsa #密钥,不用输密码,保留空#
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
32:3f:5e:7a:fc:19:78:cf:39:24:89:6e:80:dd:7a:65 oracle@node1
[oracle@node1 ~]$
[oracle@node1 ~]$ ls .ssh
id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node1 ~]$
--节点2oracle用户:
[root@node2 ~]# su - oracle
[oracle@node2 ~]$
[oracle@node2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Created directory '/home/oracle/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
22:28:28:eb:b0:fa:43:00:71:f7:ca:a2:53:ed:38:ca oracle@node2
[oracle@node2 ~]$
[oracle@node2 ~]$ ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
04:3c:bf:64:db:e3:db:9b:19:90:45:d4:06:dd:71:30 oracle@node2
[oracle@node2 ~]$
[oracle@node2 ~]$ ls .ssh
id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node2 ~]$
---配置信任关系:
[oracle@node1 ~]$ cat .ssh/id_rsa.pub >>.ssh/authorized_keys
[oracle@node1 ~]$ cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[oracle@node1 ~]$ ssh node2 cat .ssh/id_rsa.pub >>.ssh/authorized_keys
The authenticity of host 'node2 (192.168.56.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,192.168.56.12' (RSA) to the list of known hosts.
oracle@node2's password:
[oracle@node1 ~]$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
oracle@node2's password:
[oracle@node1 ~]$ scp .ssh/authorized_keys node2:~/.ssh
oracle@node2's password:
authorized_keys 100% 1992 2.0KB/s 00:00
[oracle@node1 ~]$
--可查看密钥公钥文件:
--节点1:
[oracle@node1 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node2 ~]$
---验证信任关系 :
--节点1:
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh node2 date
Fri Oct 28 13:16:54 CST 2016
[oracle@node1 ~]$ ssh node2-priv date
The authenticity of host 'node2-priv (10.10.10.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2-priv,10.10.10.12' (RSA) to the list of known hosts.
Fri Oct 28 13:17:05 CST 2016
[oracle@node1 ~]$ ssh node2-priv date
Fri Oct 28 13:17:10 CST 2016
[oracle@node1 ~]$ ssh node1 date
The authenticity of host 'node1 (192.168.56.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.56.11' (RSA) to the list of known hosts.
Fri Oct 28 13:17:38 CST 2016
[oracle@node1 ~]$ ssh node1 date
Fri Oct 28 13:17:42 CST 2016
[oracle@node1 ~]$ ssh node1-priv date
The authenticity of host 'node1-priv (10.10.10.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1-priv,10.10.10.11' (RSA) to the list of known hosts.
Fri Oct 28 13:17:54 CST 2016
[oracle@node1 ~]$ ssh node1-priv date
Fri Oct 28 13:17:57 CST 2016
[oracle@node1 ~]$
[oracle@node1 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ ssh node1 date
The authenticity of host 'node1 (192.168.56.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.56.11' (RSA) to the list of known hosts.
Fri Oct 28 13:19:23 CST 2016
[oracle@node2 ~]$ ssh node1 date
Fri Oct 28 13:19:26 CST 2016
[oracle@node2 ~]$ ssh node1-priv date
The authenticity of host 'node1-priv (10.10.10.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1-priv,10.10.10.11' (RSA) to the list of known hosts.
Fri Oct 28 13:19:51 CST 2016
[oracle@node2 ~]$ ssh node1-priv date
Fri Oct 28 13:19:54 CST 2016
[oracle@node2 ~]$ ssh node2 date
The authenticity of host 'node2 (192.168.56.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,192.168.56.12' (RSA) to the list of known hosts.
Fri Oct 28 13:20:09 CST 2016
[oracle@node2 ~]$ ssh node2 date
Fri Oct 28 13:20:12 CST 2016
[oracle@node2 ~]$ ssh node2-priv date
The authenticity of host 'node2-priv (10.10.10.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2-priv,10.10.10.12' (RSA) to the list of known hosts.
Fri Oct 28 13:20:23 CST 2016
[oracle@node2 ~]$ ssh node2-priv date
Fri Oct 28 13:20:26 CST 2016
[oracle@node2 ~]$
[oracle@node2 ~]$
[oracle@node2 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node2 ~]$
#两个节点的oracle用户已经建立互信关系。在两个节点的grid用户下同样操作建立两个节点间grid用户之间的互信关系。
[root@node1 ~]# cd /etc/yum.repos.d
[root@node1 yum.repos.d]# ls
rhel-debuginfo.repo
[root@node1 yum.repos.d]# cp rhel-debuginfo.repo yum.repo
[root@node1 yum.repos.d]# vi yum.repo
[Base]
name=Red Hat Enterprise Linux
baseurl=file:///media/Server
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
#两个节点同样的操作。
--在虚拟机里挂载光盘:安装软件。
#由于是图像化操作,具体操作略。
--挂载光盘,安装软件:
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# mount /dev/hdc /media
mount: block device /dev/hdc is write-protected, mounting read-only
[root@node1 yum.repos.d]#
#两个节点同样的操作。
--安装yum:
[root@node1 yum.repos.d]# yum install libaio* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Base | 1.3 kB 00:00
Base/primary | 753 kB 00:00
Base 2348/2348
Setting up Install Process
Package libaio-0.3.106-5.i386 already installed and latest version
Resolving Dependencies
--> Running transaction check
---> Package libaio-devel.i386 0:0.3.106-5 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
libaio-devel i386 0.3.106-5 Base 12 k
Transaction Summary
===============================================================================================
Install 1 Package(s)
Upgrade 0 Package(s)
Total download size: 12 k
Downloading Packages:
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : libaio-devel 1/1
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installed:
libaio-devel.i386 0:0.3.106-5
Complete!
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# yum install syssta* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package sysstat.i386 0:7.0.2-3.el5 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
sysstat i386 7.0.2-3.el5 Base 170 k
Transaction Summary
===============================================================================================
Install 1 Package(s)
Upgrade 0 Package(s)
Total download size: 170 k
Downloading Packages:
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : sysstat 1/1
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installed:
sysstat.i386 0:7.0.2-3.el5
Complete!
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# yum install unixO* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package unixODBC.i386 0:2.2.11-7.1 set to be updated
---> Package unixODBC-devel.i386 0:2.2.11-7.1 set to be updated
---> Package unixODBC-kde.i386 0:2.2.11-7.1 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
unixODBC i386 2.2.11-7.1 Base 832 k
unixODBC-devel i386 2.2.11-7.1 Base 737 k
unixODBC-kde i386 2.2.11-7.1 Base 558 k
Transaction Summary
===============================================================================================
Install 3 Package(s)
Upgrade 0 Package(s)
Total download size: 2.1 M
Downloading Packages:
-----------------------------------------------------------------------------------------------
Total 429 MB/s | 2.1 MB 00:00
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : unixODBC 1/3
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installing : unixODBC-kde 2/3
Installing : unixODBC-devel 3/3
Installed:
unixODBC.i386 0:2.2.11-7.1 unixODBC-devel.i386 0:2.2.11-7.1 unixODBC-kde.i386 0:2.2.11-7.1
Complete!
--查看是否安装成功,共有3个:
[root@node1 yum.repos.d]# rpm -qa |grep -i odbc
unixODBC-kde-2.2.11-7.1
unixODBC-devel-2.2.11-7.1
unixODBC-2.2.11-7.1
[root@node1 yum.repos.d]#
#yum安装成功,两个节点同样的操作。
----安装GI软件:
--进入到grid用户,创建一个soft目录:(安装只需一个节点操作,安装之后会映射到另外的节点):
--创建好soft目录之后,上传安装介质到soft目录:
[grid@node1 ~]$ pwd
/home/grid
[grid@node1 ~]$ mkdir soft
[grid@node1 ~]$ cd soft/
[grid@node1 soft]$ pwd
/home/grid/soft
[grid@node1 soft]$
[grid@node1 soft]$ rz
rz waiting to receive.
开始 zmodem 传输。 按 Ctrl+C 取消。
100% 957843 KB 5949 KB/s 00:02:41 0 Errorss
[grid@node1 soft]$ ls
linux_11gR2_grid.zip
[grid@node1 soft]$
#可看到上传成功。
--解压安装包:
[grid@node1 soft]$
[grid@node1 soft]$ unzip linux_11gR2_grid.zip
... ...
creating: grid/stage/properties/
inflating: grid/stage/properties/oracle.crs_Complete.properties
creating: grid/stage/sizes/
inflating: grid/stage/sizes/oracle.crs11.2.0.1.0Complete.sizes.properties
inflating: grid/stage/OuiConfigVariables.xml
inflating: grid/stage/fastcopy.xml
[grid@node1 soft]$
[grid@node1 soft]$ ls
grid linux_11gR2_grid.zip
[grid@node1 soft]$
#解压成功。
--进入grid目录:
[grid@node1 soft]$ cd grid/
[grid@node1 grid]$ ls
doc install response rpm runcluvfy.sh runInstaller sshsetup stage welcome.html
[grid@node1 grid]$
--先检测GI的安装环境:
[grid@node1 grid]$ ./runcluvfy.sh stage -pre crsinst -n node1,node2 -fixup -verbose
Performing pre-checks for cluster services setup
Checking node reachability...
Check: Node reachability from node "node1"
Destination Node Reachable?
------------------------------------ ------------------------
node1 yes
node2 yes
Result: Node reachability check passed from node "node1"
Checking user equivalence...
Check: User equivalence for user "grid"
Node Name Comment
------------------------------------ ------------------------
node2 passed
node1 passed
Result: User equivalence check passed for user "grid"
Checking node connectivity...
Checking hosts config file...
Node Name Status Comment
------------ ------------------------ ------------------------
node2 passed
node1 passed
Verification of the hosts config file successful
Interface information for node "node2"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.56.12 192.168.56.0 0.0.0.0 192.168.56.1 08:00:27:FB:15:AB 1500
eth1 10.10.10.12 10.10.10.0 0.0.0.0 192.168.56.1 08:00:27:59:BC:90 1500
Interface information for node "node1"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.56.11 192.168.56.0 0.0.0.0 192.168.56.1 08:00:27:FB:15:AA 1500
eth1 10.10.10.11 10.0.0.0 0.0.0.0 192.168.56.1 08:00:27:E1:66:38 1500
Check: Node connectivity of subnet "192.168.56.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
node2:eth0 node1:eth0 yes
Result: Node connectivity passed for subnet "192.168.56.0" with node(s) node2,node1
Check: TCP connectivity of subnet "192.168.56.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
node1:192.168.56.11 node2:192.168.56.12 passed
Result: TCP connectivity check passed for subnet "192.168.56.0"
... ... #查看两个节点的各项对否有fail项,没有则安装环境是好的。
Checking to make sure user "grid" is not in "root" group
Node Name Status Comment
------------ ------------------------ ------------------------
node2 does not exist passed
node1 does not exist passed
Result: User "grid" is not part of "root" group. Check passed
Check default user file creation mask
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
node2 0022 0022 passed
node1 0022 0022 passed
Result: Default user file creation mask check passed
Starting Clock synchronization checks using Network Time Protocol(NTP)...
NTP Configuration file check started...
Network Time Protocol(NTP) configuration file not found on any of the nodes. Oracle Cluster Time Synchronization Service(CTSS) can be used instead of NTP for time synchronization on the cluster nodes
Result: Clock synchronization check using Network Time Protocol(NTP) passed
Pre-check for cluster services setup was successful.
[grid@node1 grid]$
[grid@node1 grid]$
#检测完毕,无异常。
----在上面检测GI安装环境无异常之后,启动图像化界面,安装GI软件。
---启动X—shell工具,指明使用环境,启动图像化安装界面:
[grid@node1 grid]$ export DISPLAY=192.168.56.101:0.0
[grid@node1 grid]$
[grid@node1 grid]$ ls
doc install response rpm runcluvfy.sh runInstaller sshsetup stage welcome.html
[grid@node1 grid]$ ./runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 80 MB. Actual 51512 MB Passed
Checking swap space: must be greater than 150 MB. Actual 3935 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2016-10-28_02-02-45PM. Please wait ...
---图像化安装GI后面需要在两个节点的root用户上执行两条脚本:
[root@node1 ~]# /u01/app/oraInventory/orainstRoot.sh #节点1:
[root@node2 ~]# /u01/app/oraInventory/orainstRoot.sh #节点2:
[root@node1 ~]# /u01/11.2.0/grid/root.sh #节点1:
[root@node2 ~]# /u01/11.2.0/grid/root.sh #节点2:
#安装完成
---然后验证一下资源:
[grid@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node1
ora....VOTE.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.eons ora.eons.type ONLINE ONLINE node1
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type OFFLINE OFFLINE
ora.ons ora.ons.type ONLINE ONLINE node1
ora....ry.acfs ora....fs.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node1
---查看以下4个服务,都为在线状态时,可以进行下一步安装数据库软件:
[grid@node1 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@node1 ~]$
----检查完环境资源后,安装oracle软件(单一节点安装):
---在oracle用户下创建soft文件夹,将安装包上传到soft文件夹,然后解压缩:
[oracle@node1 soft]$ ls
database linux_11gR2_database_1of2.zip linux_11gR2_database_2of2.zip
[oracle@node1 soft]$ cd database/
[oracle@node1 database]$ ls
doc install response rpm runInstaller sshsetup stage welcome.html
[oracle@node1 database]$ export DISPLAY=192.168.10.1:0.0
[oracle@node1 database]$ ./runInstaller
---解压两个oracle软件安装包:
[oracle@node1 soft]$ unzip linux_11gR2_database_1of2.zip
creating: database/stage/sizes/
extracting: database/stage/sizes/oracle.server11.2.0.1.0EE.sizes.properties
extracting: database/stage/sizes/oracle.server11.2.0.1.0SE.sizes.properties
extracting: database/stage/sizes/oracle.server11.2.0.1.0Custom.sizes.properties
inflating: database/stage/OuiConfigVariables.xml
inflating: database/stage/oracle.server.11_2_0_1_0.xml
inflating: database/stage/fastcopy.xml
[oracle@node1 soft]$
[oracle@node1 soft]$
[oracle@node1 soft]$ unzip linux_11gR2_database_2of2.zip
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup6.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup5.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup2.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup10.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup11.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup7.jar
[oracle@node1 soft]$
---解压成功后,查看并进入database目录:
[oracle@node1 soft]$ ls
database linux_11gR2_database_1of2.zip linux_11gR2_database_2of2.zip
[oracle@node1 soft]$
[oracle@node1 soft]$ cd database/
[oracle@node1 database]$ ls
doc install response rpm runInstaller sshsetup stage welcome.html
---指定环境变量,启动图像化界面,安装oracle软件:
[oracle@node1 database]$ export DISPLAY=192.168.56.101:0.0
[oracle@node1 database]$ ./runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 80 MB. Actual 43893 MB Passed
Checking swap space: must be greater than 150 MB. Actual 3791 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2016-10-29_07-40-13AM. Please wait ...
... ...
#oracle软件安装成功。
---在root用户下执行以下一条脚本:
[root@node1 ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh
Running Oracle 11g root.sh script...
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/app/oracle/product/11.2.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The file "dbhome" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "oraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "coraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
#脚本执行完毕。
----使用 ASMCA创建余下来两个ASM文件组:
[grid@node1 ~]$ export DISPLAY=192.168.56.101:0.0
[grid@node1 ~]$
[grid@node1 ~]$ asmca
#创建完成。
----在SCRT启用DBCA图像化界面创建集群数据库:
[oracle@node1 ~]$ export DISPLAY=192.168.56.101:0.0
[oracle@node1 ~]$
[oracle@node1 ~]$ dbca
... ...
#安装完毕。
搭建RAC最重要的是前期工作,就是配备系统搭建环境。
俗话有说:磨刀不误砍柴工。这句话用在RAC的前期工作
最恰当不过了。经过多次的搭建测试表明:只要前期的
搭建系统环境配置无误,后面安装GI软件,oracle软件以及建库
就不会遇到各种报错。若中间环境配置的过程中稍有操作误差,
就会影响RAC集群数据库的安装工作,增加安装的工作量,要排查
错误,解决错误,甚至安装不到GI软件,或者安装不到oracle软件,
这样,就别说搭建RAC数据库了,也使前期的配置工作尽废。
使用虚拟机VM-VirtualBox
操作系统:redhat 5.5 32位
节点 ip ip-vip ip-priv
Node1 192.168.56.11 192.168.56.31 192.168.100.21
Node2 192.168.56.12 192.168.56.32 192.168.100.22
Rac_scan 192.168.56.25
----主机系统内存、网络储存等配置:
--节点1:
[root@node1 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node1
[root@node1 ~]#
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.56.11
NETMASK=255.255.255.0
GATEWAY=192.168.56.1
[root@node1 ~]#
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.100.21
GATEWAY=255.255.255.0
[root@node1 ~]#
--节点2:
[root@node2 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node2
[root@node2 ~]#
[root@node2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.56.12
NETMASK=255.255.255.0
GATEWAY=192.168.56.1
[root@node2 ~]#
[root@node2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.100.22
NETMASK=255.255.255.0
[root@node2 ~]#
--设置hosts文件:
[root@node1 ~]# vi /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost
::1 localhost6.localdomain6 localhost6
192.168.56.11 node1
192.168.56.31 node1-vip
192.168.100.21 node1-priv
192.168.56.12 node2
192.168.56.32 node2-vip
192.168.100.22 node2-priv
192.168.56.25 rac_scan
~
#两个节点一样设置。
----添加组或者用户:
--删除已存在的用户或者用户组:
[root@node1 ~]# cd /var/spool/mail
[root@node1 mail]# ls
oracle rpc tom
[root@node1 mail]# rm -rf oracle
[root@node1 mail]# cd /home
[root@node1 home]# ls
oracle tom
[root@node1 home]# rm -rf oracle/
[root@node1 home]# cd \
[root@node1 home]# cd \
>
[root@node1 ~]#
[root@node1 ~]# userdel oracle
[root@node1 ~]# groupdel dba
[root@node1 ~]# groupdel oinstall
[root@node1 ~]# groupdel oper
groupdel: group oper does not exist
[root@node1 ~]#
#删除原有的用户或者组,两个节点都是这样操作。
--添加用户或用户组:
[root@node1 ~]#
[root@node1 ~]# groupadd -g 200 oinstall
[root@node1 ~]# groupadd -g 201 dba
[root@node1 ~]# groupadd -g 202 oper
[root@node1 ~]# groupadd -g 203 asmadmin
[root@node1 ~]# groupadd -g 204 asmoper
[root@node1 ~]# groupadd -g 205 asmdba
[root@node1 ~]# useradd -u 200 -g oinstall -G dba,asmdba,oper oracle
[root@node1 ~]# useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
[root@node1 ~]#
[root@node2 ~]# cd /var/spool/mail
[root@node2 mail]# rm -rf oracle
[root@node2 mail]# cd /home
[root@node2 home]# rm -rf oracle/
[root@node2 home]# cd \
>
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]# userdel oracle
[root@node2 ~]# groupdel dba
[root@node2 ~]# groupdel oinstall
[root@node2 ~]# groupdel oper
groupdel: group oper does not exist
[root@node2 ~]#
[root@node2 ~]# groupadd -g 200 oinstall
[root@node2 ~]# groupadd -g 201 dba
[root@node2 ~]# groupadd -g 202 oper
[root@node2 ~]# groupadd -g 203 asmadmin
[root@node2 ~]# groupadd -g 204 asmoper
[root@node2 ~]# groupadd -g 205 asmdba
[root@node2 ~]# useradd -u 200 -g oinstall -G dba,asmdba,oper oracle
[root@node2 ~]# useradd -u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid
[root@node2 ~]#
----创建相关目录并授权脚本:
--节点1:
[root@node1 ~]# pwd
/root
[root@node1 ~]# mkdir -p /u01/app/oraInventory
[root@node1 ~]# chown -R grid:oinstall /u01/app/oraInventory/
[root@node1 ~]# chmod -R 775 /u01/app/oraInventory/
[root@node1 ~]# mkdir -p /u01/11.2.0/grid
[root@node1 ~]# chown -R grid:oinstall /u01/11.2.0/grid/
[root@node1 ~]# chmod -R 775 /u01/11.2.0/grid/
[root@node1 ~]# mkdir -p /u01/app/oracle
[root@node1 ~]# mkdir -p /u01/app/oracle/cfgtoollogs
[root@node1 ~]# mkdir -p /u01/app/oracle/product/11.2.0/db_1
[root@node1 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@node1 ~]# chmod -R 775 /u01/app/oracle
[root@node1 ~]#
----------------------------
--节点2:
[root@node2 ~]# pwd
/root
[root@node2 ~]# mkdir -p /u01/app/oraInventory
[root@node2 ~]# chown -R grid:oinstall /u01/app/oraInventory/
[root@node2 ~]# chmod -R 775 /u01/app/oraInventory/
[root@node2 ~]# mkdir -p /u01/11.2.0/grid
[root@node2 ~]# chown -R grid:oinstall /u01/11.2.0/grid/
[root@node2 ~]# chmod -R 775 /u01/11.2.0/grid/
[root@node2 ~]# mkdir -p /u01/app/oracle
[root@node2 ~]# mkdir -p /u01/app/oracle/cfgtoollogs
[root@node2 ~]# mkdir -p /u01/app/oracle/product/11.2.0/db_1
[root@node2 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@node2 ~]# chmod -R 775 /u01/app/oracle
[root@node2 ~]#
----设置oracle用户和grid用户密码:
[root@node1 ~]#
[root@node1 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node1 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node1 ~]#
------------------------
[root@node2 ~]#
[root@node2 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node2 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@node2 ~]#
#两个节点都同样设置。
----修改内核参数:
--添加内核文件内容1:
[root@node1 ~]# vi /etc/sysctl.conf
# Kernel sysctl configuration file for Red Hat Linux
#
# For binary values, 0 is disabled, 1 is enabled. See sysctl(8) and
# sysctl.conf(5) for more details.
... ...
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 536870912
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
--内核参数修改生效:
[root@node1 ~]# sysctl -p
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 4294967295
kernel.shmall = 268435456
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 536870912
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
[root@node1 ~]#
#两个节点同样的操作。
--添加内核文件内容2:
[root@node1 ~]# vi /etc/security/limits.conf
# /etc/security/limits.conf
#
#Each line describes a limit for a user in the form:
... ...
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
--添加内核文件内容3:
[root@node1 ~]# vi /etc/pam.d/login
session required /lib/security/pam_limits.so
#两个加点同样操作。
--添加内核文件内容4:
[root@node1 ~]# vi /etc/profile :
if [ $USER = "oracle" ]||[ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
#两个节点同样操作。
----关闭系统ntp服务,采用oracle 自带的时间同步服务:
--停止部分系统服务:
[root@node1 ~]#
[root@node1 ~]# chkconfig ntpd off
[root@node1 ~]# mv /etc/ntp.conf /etc/ntp.conf.bak
[root@node1 ~]# chkconfig sendmail off
[root@node1 ~]#
#两个节点同样操作。
--校验连个节点时间相差20s内:
[root@node1 ~]#
[root@node1 ~]# date
Fri Oct 28 12:23:11 CST 2016
[root@node1 ~]#
[root@node2 ~]#
[root@node2 ~]# date
Fri Oct 28 12:23:20 CST 2016
[root@node2 ~]#
----进入oracle与grid用户分别修改环境变量(所有节点):
#node1 ORACLE_SID=prod1 ORACLE_SID=+ASM1
#node2 ORACLE_SID=prod2 ORACLE_SID=+ASM2
---oracle用户:
--节点1:
[oracle@node1 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=prod1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
[oracle@node1 ~]$ . .bash_profile
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=prod2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
[oracle@node2 ~]$ . .bash_profile
[oracle@node2 ~]$
---grid用户:
--节点1:
[grid@node1 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/11.2.0/grid
export GRID_HOME=/u01/11.2.0/grid
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export THREADS_FLAG=native
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
~
".bash_profile" 23L, 484C written
[grid@node1 ~]$ . .bash_profile
[grid@node1 ~]$
--节点2:
[grid@node2 ~]$ vi .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
export EDITOR=vi
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/11.2.0/grid
export GRID_HOME=/u01/11.2.0/grid
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export THREADS_FLAG=native
export PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
umask 022
~
~
".bash_profile" 23L, 484C written
[grid@node2 ~]$ . .bash_profile
[grid@node2 ~]$
----配置共享存储:
---通过ASM管理:
1)OCR DISK :存储CRS资源配置信息
2)VOTE DISK:仲裁盘,记录节点状态
3)Data Disk:存放datafile、controlfile、redologfile、spfile 等
4)Recovery Area:存放flashback database log、archive log、rman backup等
--查看磁盘大小情况:
[root@node1 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn't contain a valid partition table
[root@node1 ~]#
--分配磁盘分区:
[root@node1 ~]# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.
The number of cylinders for this disk is set to 3263.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
(e.g., DOS FDISK, OS/2 FDISK)
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
--分盘操作:
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-3263, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 2
First cylinder (124-3263, default 124):
Using default value 124
Last cylinder or +size or +sizeM or +sizeK (124-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 3
First cylinder (247-3263, default 247):
Using default value 247
Last cylinder or +size or +sizeM or +sizeK (247-3263, default 3263): +1G
Command (m for help): n
Command action
e extended
p primary partition (1-4)
e
Selected partition 4
First cylinder (370-3263, default 370):
Using default value 370
Last cylinder or +size or +sizeM or +sizeK (370-3263, default 3263):
Using default value 3263
Command (m for help): n
First cylinder (370-3263, default 370):
Using default value 370
Last cylinder or +size or +sizeM or +sizeK (370-3263, default 3263): +7G
Command (m for help): n
First cylinder (1222-3263, default 1222):
Using default value 1222
Last cylinder or +size or +sizeM or +sizeK (1222-3263, default 3263): +7G
Command (m for help): n
First cylinder (2074-3263, default 2074):
Using default value 2074
Last cylinder or +size or +sizeM or +sizeK (2074-3263, default 3263): +3G
Command (m for help): n
First cylinder (2440-3263, default 2440):
Using default value 2440
Last cylinder or +size or +sizeM or +sizeK (2440-3263, default 3263): +3G
Command (m for help): n
First cylinder (2806-3263, default 2806):
Using default value 2806
Last cylinder or +size or +sizeM or +sizeK (2806-3263, default 3263): +1G
Command (m for help): n
First cylinder (2929-3263, default 2929): +1G
Value out of range.
First cylinder (2929-3263, default 2929):
Using default value 2929
Last cylinder or +size or +sizeM or +sizeK (2929-3263, default 3263): +1G
Command (m for help): n
First cylinder (3052-3263, default 3052):
Using default value 3052
Last cylinder or +size or +sizeM or +sizeK (3052-3263, default 3263):
Using default value 3263
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
[root@node1 ~]#
#分区只需在节点1操作。
--查看磁盘分区情况:
[root@node1 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 123 987966 83 Linux
/dev/sdb2 124 246 987997+ 83 Linux
/dev/sdb3 247 369 987997+ 83 Linux
/dev/sdb4 370 3263 23246055 5 Extended
/dev/sdb5 370 1221 6843658+ 83 Linux
/dev/sdb6 1222 2073 6843658+ 83 Linux
/dev/sdb7 2074 2439 2939863+ 83 Linux
/dev/sdb8 2440 2805 2939863+ 83 Linux
/dev/sdb9 2806 2928 987966 83 Linux
/dev/sdb10 2929 3051 987966 83 Linux
/dev/sdb11 3052 3263 1702858+ 83 Linux
[root@node1 ~]#
--在node2上查看磁盘,由于是共享的,所有node2查看的磁盘已经分好区:
[root@node2 ~]# fdisk -l
Disk /dev/sda: 68.8 GB, 68862869504 bytes
255 heads, 63 sectors/track, 8372 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 8372 67143667+ 8e Linux LVM
Disk /dev/sdb: 26.8 GB, 26843545600 bytes
255 heads, 63 sectors/track, 3263 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 123 987966 83 Linux
/dev/sdb2 124 246 987997+ 83 Linux
/dev/sdb3 247 369 987997+ 83 Linux
/dev/sdb4 370 3263 23246055 5 Extended
/dev/sdb5 370 1221 6843658+ 83 Linux
/dev/sdb6 1222 2073 6843658+ 83 Linux
/dev/sdb7 2074 2439 2939863+ 83 Linux
/dev/sdb8 2440 2805 2939863+ 83 Linux
/dev/sdb9 2806 2928 987966 83 Linux
/dev/sdb10 2929 3051 987966 83 Linux
/dev/sdb11 3052 3263 1702858+ 83 Linux
[root@node2 ~]#
---ASM软件管理:
--创建spft文件夹,并上传rpm包:
[root@node1 ~]#
[root@node1 ~]# mkdir asm
[root@node1 ~]# ls
anaconda-ks.cfg asm Desktop install.log install.log.syslog
[root@node1 ~]#
[root@node1 ~]# cd asm
[root@node1 asm]# rz
rz waiting to receive.
开始 zmodem 传输。 按 Ctrl+C 取消。
100% 126 KB 126 KB/s 00:00:01 0 Errors686.rpm...
100% 13 KB 13 KB/s 00:00:01 0 Errors
100% 83 KB 83 KB/s 00:00:01 0 Errors...
#上传成功。
--安装rmp包:
[root@node1 asm]#
[root@node1 asm]# ls
oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm #该rmp包的版本要求与系统内核版本一直,查看内核版本的命令:uname -a.
oracleasm-support-2.1.8-1.el5.i386.rpm
oracleasmlib-2.0.4-1.el5.i386.rpm
[root@node1 asm]#
[root@node1 asm]#
[root@node1 asm]# rpm -ivh *
warning: oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [ 33%]
2:oracleasm-2.6.18-194.el########################################### [ 67%]
3:oracleasmlib ########################################### [100%]
[root@node1 asm]#
#安装完毕,两个节点同样的操作。
---配置oracleASM,两个节点同样操作:
[root@node1 asm]#
[root@node1 asm]# service oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 asm]#
[root@node1 asm]#
---建立oracle ASM磁盘:
--节点1:#只需在一个节点操作(这里是节点1),在另外一个节点2作另外操作其他命令。
[root@node1 asm]#
[root@node1 asm]# service oracleasm
Usage: /etc/init.d/oracleasm {start|stop|restart|enable|disable|configure|createdisk|deletedisk|querydisk|listdisks|scandisks|status}
[root@node1 asm]# service oracleasm createdisk OCR_VOTE1 /dev/sdb1
Marking disk "OCR_VOTE1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk OCR_VOTE2 /dev/sdb2
Marking disk "OCR_VOTE2" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk OCR_VOTE3 /dev/sdb3
Marking disk "OCR_VOTE3" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_DATA1 /dev/sdb5
Marking disk "ASM_DATA1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_DATA2 /dev/sdb6
Marking disk "ASM_DATA2" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_RCY1 /dev/sdb7
Marking disk "ASM_RCY1" as an ASM disk: [ OK ]
[root@node1 asm]# service oracleasm createdisk ASM_RCY2 /dev/sdb8
Marking disk "ASM_RCY2" as an ASM disk: [ OK ]
[root@node1 asm]#
[root@node1 asm]# service oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2
OCR_VOTE3
[root@node1 asm]#
--节点2:
[root@node2 asm]#
[root@node2 asm]# service oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node2 asm]#
[root@node2 asm]#
[root@node2 asm]# service oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2
OCR_VOTE3
[root@node2 asm]#
#可以看到在节点1建立的ASM磁盘已经映射到节点2.
----建立主机间的互信:
--建立节点之间oracle 、grid 用户之间的信任(通过ssh 建立公钥和私钥)
--生成密钥对(所有节点的oracle用户和grid用户):
---oracle用户:
--节点1oracle用户:
[root@node1 ~]# su - oracle
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh-keygen -t rsa #公钥,不用输密码,保留空#
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Created directory '/home/oracle/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
09:7d:4d:26:a5:3c:40:24:55:bd:25:5f:cd:e3:5f:73 oracle@node1
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh-keygen -t dsa #密钥,不用输密码,保留空#
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
32:3f:5e:7a:fc:19:78:cf:39:24:89:6e:80:dd:7a:65 oracle@node1
[oracle@node1 ~]$
[oracle@node1 ~]$ ls .ssh
id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node1 ~]$
--节点2oracle用户:
[root@node2 ~]# su - oracle
[oracle@node2 ~]$
[oracle@node2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Created directory '/home/oracle/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
22:28:28:eb:b0:fa:43:00:71:f7:ca:a2:53:ed:38:ca oracle@node2
[oracle@node2 ~]$
[oracle@node2 ~]$ ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
04:3c:bf:64:db:e3:db:9b:19:90:45:d4:06:dd:71:30 oracle@node2
[oracle@node2 ~]$
[oracle@node2 ~]$ ls .ssh
id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node2 ~]$
---配置信任关系:
[oracle@node1 ~]$ cat .ssh/id_rsa.pub >>.ssh/authorized_keys
[oracle@node1 ~]$ cat .ssh/id_dsa.pub >>.ssh/authorized_keys
[oracle@node1 ~]$ ssh node2 cat .ssh/id_rsa.pub >>.ssh/authorized_keys
The authenticity of host 'node2 (192.168.56.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,192.168.56.12' (RSA) to the list of known hosts.
oracle@node2's password:
[oracle@node1 ~]$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
oracle@node2's password:
[oracle@node1 ~]$ scp .ssh/authorized_keys node2:~/.ssh
oracle@node2's password:
authorized_keys 100% 1992 2.0KB/s 00:00
[oracle@node1 ~]$
--可查看密钥公钥文件:
--节点1:
[oracle@node1 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub
[oracle@node2 ~]$
---验证信任关系 :
--节点1:
[oracle@node1 ~]$
[oracle@node1 ~]$ ssh node2 date
Fri Oct 28 13:16:54 CST 2016
[oracle@node1 ~]$ ssh node2-priv date
The authenticity of host 'node2-priv (10.10.10.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2-priv,10.10.10.12' (RSA) to the list of known hosts.
Fri Oct 28 13:17:05 CST 2016
[oracle@node1 ~]$ ssh node2-priv date
Fri Oct 28 13:17:10 CST 2016
[oracle@node1 ~]$ ssh node1 date
The authenticity of host 'node1 (192.168.56.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.56.11' (RSA) to the list of known hosts.
Fri Oct 28 13:17:38 CST 2016
[oracle@node1 ~]$ ssh node1 date
Fri Oct 28 13:17:42 CST 2016
[oracle@node1 ~]$ ssh node1-priv date
The authenticity of host 'node1-priv (10.10.10.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1-priv,10.10.10.11' (RSA) to the list of known hosts.
Fri Oct 28 13:17:54 CST 2016
[oracle@node1 ~]$ ssh node1-priv date
Fri Oct 28 13:17:57 CST 2016
[oracle@node1 ~]$
[oracle@node1 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node1 ~]$
--节点2:
[oracle@node2 ~]$ ssh node1 date
The authenticity of host 'node1 (192.168.56.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.56.11' (RSA) to the list of known hosts.
Fri Oct 28 13:19:23 CST 2016
[oracle@node2 ~]$ ssh node1 date
Fri Oct 28 13:19:26 CST 2016
[oracle@node2 ~]$ ssh node1-priv date
The authenticity of host 'node1-priv (10.10.10.11)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1-priv,10.10.10.11' (RSA) to the list of known hosts.
Fri Oct 28 13:19:51 CST 2016
[oracle@node2 ~]$ ssh node1-priv date
Fri Oct 28 13:19:54 CST 2016
[oracle@node2 ~]$ ssh node2 date
The authenticity of host 'node2 (192.168.56.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,192.168.56.12' (RSA) to the list of known hosts.
Fri Oct 28 13:20:09 CST 2016
[oracle@node2 ~]$ ssh node2 date
Fri Oct 28 13:20:12 CST 2016
[oracle@node2 ~]$ ssh node2-priv date
The authenticity of host 'node2-priv (10.10.10.12)' can't be established.
RSA key fingerprint is 25:cb:8a:67:4a:41:eb:1d:39:1e:ba:8f:0d:24:05:21.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2-priv,10.10.10.12' (RSA) to the list of known hosts.
Fri Oct 28 13:20:23 CST 2016
[oracle@node2 ~]$ ssh node2-priv date
Fri Oct 28 13:20:26 CST 2016
[oracle@node2 ~]$
[oracle@node2 ~]$
[oracle@node2 ~]$ ls .ssh
authorized_keys id_dsa id_dsa.pub id_rsa id_rsa.pub known_hosts
[oracle@node2 ~]$
#两个节点的oracle用户已经建立互信关系。在两个节点的grid用户下同样操作建立两个节点间grid用户之间的互信关系。
[root@node1 ~]# cd /etc/yum.repos.d
[root@node1 yum.repos.d]# ls
rhel-debuginfo.repo
[root@node1 yum.repos.d]# cp rhel-debuginfo.repo yum.repo
[root@node1 yum.repos.d]# vi yum.repo
[Base]
name=Red Hat Enterprise Linux
baseurl=file:///media/Server
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
#两个节点同样的操作。
--在虚拟机里挂载光盘:安装软件。
#由于是图像化操作,具体操作略。
--挂载光盘,安装软件:
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# mount /dev/hdc /media
mount: block device /dev/hdc is write-protected, mounting read-only
[root@node1 yum.repos.d]#
#两个节点同样的操作。
--安装yum:
[root@node1 yum.repos.d]# yum install libaio* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Base | 1.3 kB 00:00
Base/primary | 753 kB 00:00
Base 2348/2348
Setting up Install Process
Package libaio-0.3.106-5.i386 already installed and latest version
Resolving Dependencies
--> Running transaction check
---> Package libaio-devel.i386 0:0.3.106-5 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
libaio-devel i386 0.3.106-5 Base 12 k
Transaction Summary
===============================================================================================
Install 1 Package(s)
Upgrade 0 Package(s)
Total download size: 12 k
Downloading Packages:
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : libaio-devel 1/1
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installed:
libaio-devel.i386 0:0.3.106-5
Complete!
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# yum install syssta* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package sysstat.i386 0:7.0.2-3.el5 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
sysstat i386 7.0.2-3.el5 Base 170 k
Transaction Summary
===============================================================================================
Install 1 Package(s)
Upgrade 0 Package(s)
Total download size: 170 k
Downloading Packages:
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : sysstat 1/1
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installed:
sysstat.i386 0:7.0.2-3.el5
Complete!
[root@node1 yum.repos.d]#
[root@node1 yum.repos.d]# yum install unixO* -y
Loaded plugins: rhnplugin, security
Repository rhel-debuginfo is listed more than once in the configuration
Repository rhel-debuginfo-beta is listed more than once in the configuration
This system is not registered with RHN.
RHN support will be disabled.
Setting up Install Process
Resolving Dependencies
--> Running transaction check
---> Package unixODBC.i386 0:2.2.11-7.1 set to be updated
---> Package unixODBC-devel.i386 0:2.2.11-7.1 set to be updated
---> Package unixODBC-kde.i386 0:2.2.11-7.1 set to be updated
--> Finished Dependency Resolution
Dependencies Resolved
===============================================================================================
Package Arch Version Repository Size
===============================================================================================
Installing:
unixODBC i386 2.2.11-7.1 Base 832 k
unixODBC-devel i386 2.2.11-7.1 Base 737 k
unixODBC-kde i386 2.2.11-7.1 Base 558 k
Transaction Summary
===============================================================================================
Install 3 Package(s)
Upgrade 0 Package(s)
Total download size: 2.1 M
Downloading Packages:
-----------------------------------------------------------------------------------------------
Total 429 MB/s | 2.1 MB 00:00
Running rpm_check_debug
Running Transaction Test
Finished Transaction Test
Transaction Test Succeeded
Running Transaction
Installing : unixODBC 1/3
error: failed to stat /media/RHEL_5.5 i386 DVD: No such file or directory
Installing : unixODBC-kde 2/3
Installing : unixODBC-devel 3/3
Installed:
unixODBC.i386 0:2.2.11-7.1 unixODBC-devel.i386 0:2.2.11-7.1 unixODBC-kde.i386 0:2.2.11-7.1
Complete!
--查看是否安装成功,共有3个:
[root@node1 yum.repos.d]# rpm -qa |grep -i odbc
unixODBC-kde-2.2.11-7.1
unixODBC-devel-2.2.11-7.1
unixODBC-2.2.11-7.1
[root@node1 yum.repos.d]#
#yum安装成功,两个节点同样的操作。
----安装GI软件:
--进入到grid用户,创建一个soft目录:(安装只需一个节点操作,安装之后会映射到另外的节点):
--创建好soft目录之后,上传安装介质到soft目录:
[grid@node1 ~]$ pwd
/home/grid
[grid@node1 ~]$ mkdir soft
[grid@node1 ~]$ cd soft/
[grid@node1 soft]$ pwd
/home/grid/soft
[grid@node1 soft]$
[grid@node1 soft]$ rz
rz waiting to receive.
开始 zmodem 传输。 按 Ctrl+C 取消。
100% 957843 KB 5949 KB/s 00:02:41 0 Errorss
[grid@node1 soft]$ ls
linux_11gR2_grid.zip
[grid@node1 soft]$
#可看到上传成功。
--解压安装包:
[grid@node1 soft]$
[grid@node1 soft]$ unzip linux_11gR2_grid.zip
... ...
creating: grid/stage/properties/
inflating: grid/stage/properties/oracle.crs_Complete.properties
creating: grid/stage/sizes/
inflating: grid/stage/sizes/oracle.crs11.2.0.1.0Complete.sizes.properties
inflating: grid/stage/OuiConfigVariables.xml
inflating: grid/stage/fastcopy.xml
[grid@node1 soft]$
[grid@node1 soft]$ ls
grid linux_11gR2_grid.zip
[grid@node1 soft]$
#解压成功。
--进入grid目录:
[grid@node1 soft]$ cd grid/
[grid@node1 grid]$ ls
doc install response rpm runcluvfy.sh runInstaller sshsetup stage welcome.html
[grid@node1 grid]$
--先检测GI的安装环境:
[grid@node1 grid]$ ./runcluvfy.sh stage -pre crsinst -n node1,node2 -fixup -verbose
Performing pre-checks for cluster services setup
Checking node reachability...
Check: Node reachability from node "node1"
Destination Node Reachable?
------------------------------------ ------------------------
node1 yes
node2 yes
Result: Node reachability check passed from node "node1"
Checking user equivalence...
Check: User equivalence for user "grid"
Node Name Comment
------------------------------------ ------------------------
node2 passed
node1 passed
Result: User equivalence check passed for user "grid"
Checking node connectivity...
Checking hosts config file...
Node Name Status Comment
------------ ------------------------ ------------------------
node2 passed
node1 passed
Verification of the hosts config file successful
Interface information for node "node2"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.56.12 192.168.56.0 0.0.0.0 192.168.56.1 08:00:27:FB:15:AB 1500
eth1 10.10.10.12 10.10.10.0 0.0.0.0 192.168.56.1 08:00:27:59:BC:90 1500
Interface information for node "node1"
Name IP Address Subnet Gateway Def. Gateway HW Address MTU
------ --------------- --------------- --------------- --------------- ----------------- ------
eth0 192.168.56.11 192.168.56.0 0.0.0.0 192.168.56.1 08:00:27:FB:15:AA 1500
eth1 10.10.10.11 10.0.0.0 0.0.0.0 192.168.56.1 08:00:27:E1:66:38 1500
Check: Node connectivity of subnet "192.168.56.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
node2:eth0 node1:eth0 yes
Result: Node connectivity passed for subnet "192.168.56.0" with node(s) node2,node1
Check: TCP connectivity of subnet "192.168.56.0"
Source Destination Connected?
------------------------------ ------------------------------ ----------------
node1:192.168.56.11 node2:192.168.56.12 passed
Result: TCP connectivity check passed for subnet "192.168.56.0"
... ... #查看两个节点的各项对否有fail项,没有则安装环境是好的。
Checking to make sure user "grid" is not in "root" group
Node Name Status Comment
------------ ------------------------ ------------------------
node2 does not exist passed
node1 does not exist passed
Result: User "grid" is not part of "root" group. Check passed
Check default user file creation mask
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
node2 0022 0022 passed
node1 0022 0022 passed
Result: Default user file creation mask check passed
Starting Clock synchronization checks using Network Time Protocol(NTP)...
NTP Configuration file check started...
Network Time Protocol(NTP) configuration file not found on any of the nodes. Oracle Cluster Time Synchronization Service(CTSS) can be used instead of NTP for time synchronization on the cluster nodes
Result: Clock synchronization check using Network Time Protocol(NTP) passed
Pre-check for cluster services setup was successful.
[grid@node1 grid]$
[grid@node1 grid]$
#检测完毕,无异常。
----在上面检测GI安装环境无异常之后,启动图像化界面,安装GI软件。
---启动X—shell工具,指明使用环境,启动图像化安装界面:
[grid@node1 grid]$ export DISPLAY=192.168.56.101:0.0
[grid@node1 grid]$
[grid@node1 grid]$ ls
doc install response rpm runcluvfy.sh runInstaller sshsetup stage welcome.html
[grid@node1 grid]$ ./runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 80 MB. Actual 51512 MB Passed
Checking swap space: must be greater than 150 MB. Actual 3935 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2016-10-28_02-02-45PM. Please wait ...
---图像化安装GI后面需要在两个节点的root用户上执行两条脚本:
[root@node1 ~]# /u01/app/oraInventory/orainstRoot.sh #节点1:
[root@node2 ~]# /u01/app/oraInventory/orainstRoot.sh #节点2:
[root@node1 ~]# /u01/11.2.0/grid/root.sh #节点1:
[root@node2 ~]# /u01/11.2.0/grid/root.sh #节点2:
#安装完成
---然后验证一下资源:
[grid@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node1
ora....VOTE.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.eons ora.eons.type ONLINE ONLINE node1
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type OFFLINE OFFLINE
ora.ons ora.ons.type ONLINE ONLINE node1
ora....ry.acfs ora....fs.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node1
---查看以下4个服务,都为在线状态时,可以进行下一步安装数据库软件:
[grid@node1 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@node1 ~]$
----检查完环境资源后,安装oracle软件(单一节点安装):
---在oracle用户下创建soft文件夹,将安装包上传到soft文件夹,然后解压缩:
[oracle@node1 soft]$ ls
database linux_11gR2_database_1of2.zip linux_11gR2_database_2of2.zip
[oracle@node1 soft]$ cd database/
[oracle@node1 database]$ ls
doc install response rpm runInstaller sshsetup stage welcome.html
[oracle@node1 database]$ export DISPLAY=192.168.10.1:0.0
[oracle@node1 database]$ ./runInstaller
---解压两个oracle软件安装包:
[oracle@node1 soft]$ unzip linux_11gR2_database_1of2.zip
creating: database/stage/sizes/
extracting: database/stage/sizes/oracle.server11.2.0.1.0EE.sizes.properties
extracting: database/stage/sizes/oracle.server11.2.0.1.0SE.sizes.properties
extracting: database/stage/sizes/oracle.server11.2.0.1.0Custom.sizes.properties
inflating: database/stage/OuiConfigVariables.xml
inflating: database/stage/oracle.server.11_2_0_1_0.xml
inflating: database/stage/fastcopy.xml
[oracle@node1 soft]$
[oracle@node1 soft]$
[oracle@node1 soft]$ unzip linux_11gR2_database_2of2.zip
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup6.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup5.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup2.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup10.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup11.jar
inflating: database/stage/Components/oracle.sysman.console.db/11.2.0.1.0/1/DataFiles/filegroup7.jar
[oracle@node1 soft]$
---解压成功后,查看并进入database目录:
[oracle@node1 soft]$ ls
database linux_11gR2_database_1of2.zip linux_11gR2_database_2of2.zip
[oracle@node1 soft]$
[oracle@node1 soft]$ cd database/
[oracle@node1 database]$ ls
doc install response rpm runInstaller sshsetup stage welcome.html
---指定环境变量,启动图像化界面,安装oracle软件:
[oracle@node1 database]$ export DISPLAY=192.168.56.101:0.0
[oracle@node1 database]$ ./runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 80 MB. Actual 43893 MB Passed
Checking swap space: must be greater than 150 MB. Actual 3791 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2016-10-29_07-40-13AM. Please wait ...
... ...
#oracle软件安装成功。
---在root用户下执行以下一条脚本:
[root@node1 ~]# /u01/app/oracle/product/11.2.0/db_1/root.sh
Running Oracle 11g root.sh script...
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/app/oracle/product/11.2.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The file "dbhome" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "oraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "coraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
#脚本执行完毕。
----使用 ASMCA创建余下来两个ASM文件组:
[grid@node1 ~]$ export DISPLAY=192.168.56.101:0.0
[grid@node1 ~]$
[grid@node1 ~]$ asmca
#创建完成。
----在SCRT启用DBCA图像化界面创建集群数据库:
[oracle@node1 ~]$ export DISPLAY=192.168.56.101:0.0
[oracle@node1 ~]$
[oracle@node1 ~]$ dbca
... ...
#安装完毕。
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/31392094/viewspace-2127342/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/31392094/viewspace-2127342/