oracle rac 安装

1      服务器环境准备工作

地址分配

类型

Node1

Node2

公有ip(public)eth0

192.168.56.103

192.168.56.105

心跳ip(virtual)

192.168.56.104

192.168.56.106

私有ip(private)eth1

8.8.8.8

8.8.8.88

Scan ip

192.168.56.107

 

1.1       所有节点都修改修改hosts文件(如有dns服务器需在dns服务器上配置)

[root@localhost ~]# vi /etc/hosts

192.168.56.103  node1

192.168.56.105  node2

8.8.8.8         node1-priv

8.8.8.88        node2-priv

192.168.56.104  node1-vip

192.168.56.106  node2-vip

192.168.56.107  node-scan

 

1.2       修改主机名称

Node1主机

[root@localhost ~]# vi/etc/sysconfig/network

NETWORKING=yes

HOSTNAME=node1

 

Node2主机

[root@localhost ~]# vi/etc/sysconfig/network

NETWORKING=yes

HOSTNAME=node2

 

1.3       修改IP地址

node1服务器

 [root@localhost~]#vi /etc/sysconfig/network-scripts/ifcfg-eth0

 

DEVICE=eth0

HWADDR=08:00:27:36:0C:A5

TYPE=Ethernet

UUID=abd4b02b-6cd5-46dd-8285-0a04103c15c3

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=192.168.56.103

NETMASK=255.255.255.0

 

[root@localhost ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth1

 

DEVICE=eth1

HWADDR=08:00:27:0D:80:CB

TYPE=Ethernet

UUID=b3dbb02d-29b0-4988-800f-8b5d39fda2cd

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=8.8.8.8

NETMASK=255.255.255.0

 

node2服务器

[root@localhost ~]#vi/etc/sysconfig/network-scripts/ifcfg-eth0

 

DEVICE=eth0

HWADDR=08:00:27:D3:7F:D1

TYPE=Ethernet

UUID=abd4b02b-6cd5-46dd-8285-0a04103c15c3

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=192.168.56.105

NETMASK=255.255.255.0

 

 

[root@localhost ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth1

 

DEVICE=eth1

HWADDR=08:00:27:C6:D9:9F

TYPE=Ethernet

UUID=b3dbb02d-29b0-4988-800f-8b5d39fda2cd

ONBOOT=yes

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=8.8.8.88

NETMASK=255.255.255.0

 

1.4       创建用户和组

Node1服务器

userdel -r oracle

groupdel oinstall

groupdel dba

 

groupadd -g 500 oinstall

groupadd -g 501 dba

groupadd -g 502 oper

groupadd -g 503 asmadmin

groupadd -g 504 asmdba

groupadd -g 505 asmoper

useradd -m -u 500 -g oinstall -Gasmadmin,asmdba,asmoper grid

useradd -m -u 501 -g oinstall -Gdba,oper,asmdba oracle

 

检查

[root@localhost ~]# id grid

uid=500(grid) gid=500(oinstall) 组=500(oinstall),503(asmadmin),504(asmdba),505(asmoper)

[root@localhost ~]# id oracle

uid=501(oracle) gid=500(oinstall) 组=500(oinstall),501(dba),502(oper),504(asmdba)

 

修改密码

passwd grid

passwd oracle

 

 

node2服务器

 

userdel -r oracle

groupdel oinstall

groupdel dba

 

groupadd -g 500 oinstall

groupadd -g 501 dba

groupadd -g 502 oper

groupadd -g 503 asmadmin

groupadd -g 504 asmdba

groupadd -g 505 asmoper

useradd -m -u 500 -g oinstall -Gasmadmin,asmdba,asmoper grid

useradd -m -u 501 -g oinstall -Gdba,oper,asmdba oracle

 

检查

[root@localhost ~]# id grid

uid=500(grid) gid=500(oinstall) 组=500(oinstall),503(asmadmin),504(asmdba),505(asmoper)

[root@localhost ~]# id oracle

uid=501(oracle) gid=500(oinstall) 组=500(oinstall),501(dba),502(oper),504(asmdba)

 

修改密码

passwd grid

passwd oracle

 

1.5       创建安装目录

 

Node1服务器

 

mkdir -p /u01/app

chown -R grid:oinstall /u01/app

chmod -R 775 /u01/app

mkdir -p /u01/app/oraInventory

chown -R grid:oinstall/u01/app/oraInventory

chmod -R 775 /u01/app/oraInventory

mkdir -p /u01/app/grid/product/11.2.0

chown -R grid:oinstall /u01/app/grid

chmod -R 775 /u01/app/grid

mkdir -p /u01/app/oracle/product/11.2.0

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01/app/oracle

 

 

node2服务器

 

mkdir -p /u01/app

chown -R grid:oinstall /u01/app

chmod -R 775 /u01/app

mkdir -p /u01/app/oraInventory

chown -R grid:oinstall/u01/app/oraInventory

chmod -R 775 /u01/app/oraInventory

mkdir -p /u01/app/grid/product/11.2.0

chown -R grid:oinstall /u01/app/grid

chmod -R 775 /u01/app/grid

mkdir -p /u01/app/oracle/product/11.2.0

chown -R oracle:oinstall /u01/app/oracle

chmod -R 775 /u01/app/oracle

 

 

1.6       配置环境变量

Node1服务器

Grid用户

[root@localhost u01]# su - grid

[grid@localhost ~]$ vi .bash_profile

 

export ORACLE_SID=+ASM1

export ORACLE_BASE=/u01/app/grid

exportORACLE_HOME=$ORACLE_BASE/product/11.2.0

exportPATH=$ORACLE_HOME/bin:$PATH:/usr/local/bin/:/usr/bin:/etc:/usr/sbin:/usr/ucb:/sbin:/$ORACLE_HOME/OPatch:/bin:/usr/ccs/bin:$PATH

exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/rdbms/lib:/lib:/usr/lib

exportCLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export TEMP=/tmp

export TMP=/tmp

export TMPDIR=/tmp

export CVUQDISK_GRP=oinstall

umask 022

 

oracle用户

[root@localhost u01]# su - oracle

[oracle@localhost ~]$ vi .bash_profile

 

export ORACLE_SID=node1

export ORACLE_UNQUNAME=node

export ORACLE_BASE=/u01/app/oracle

exportORACLE_HOME=$ORACLE_BASE/product/11.2.0

export PATH=$ORACLE_HOME/bin:$PATH:$ORA_CRS_HOME/BIN:/usr/bin:/etc:/usr/sbin:/usr/ucb:/sbin:/$ORACLE_HOME/OPatch:/bin:/usr/ccs/bin:$PATH

exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/rdbms/lib:/lib:/usr/lib

exportCLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export TEMP=/tmp

export TMPDIR=/tmp

umask 022

 

Node2服务器

 

Grid用户

[root@localhost u01]# su - grid

[grid@localhost ~]$ vi .bash_profile

 

export ORACLE_SID=+ASM2

export ORACLE_BASE=/u01/app/grid

exportORACLE_HOME=$ORACLE_BASE/product/11.2.0

exportPATH=$ORACLE_HOME/bin:$PATH:/usr/local/bin/:/usr/bin:/etc:/usr/sbin:/usr/ucb:/sbin:/$ORACLE_HOME/OPatch:/bin:/usr/ccs/bin:$PATH

exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/rdbms/lib:/lib:/usr/lib

exportCLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export TEMP=/tmp

export TMP=/tmp

export TMPDIR=/tmp

export CVUQDISK_GRP=oinstall

umask 022

 

oracle用户

[root@localhost u01]# su - oracle

[oracle@localhost ~]$ vi .bash_profile

 

export ORACLE_SID=node2

export ORACLE_UNQUNAME=node

export ORACLE_BASE=/u01/app/oracle

exportORACLE_HOME=$ORACLE_BASE/product/11.2.0

export PATH=$ORACLE_HOME/bin:$PATH:$ORA_CRS_HOME/BIN:/usr/bin:/etc:/usr/sbin:/usr/ucb:/sbin:/$ORACLE_HOME/OPatch:/bin:/usr/ccs/bin:$PATH

exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/rdbms/lib:/lib:/usr/lib

exportCLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

export TEMP=/tmp

export TMPDIR=/tmp

umask 022

 

 

1.7     关闭所有节点防火墙

 

Node1服务器

[root@node1 ~]# vi /etc/selinux/config

# This file controls the state of SELinuxon the system.

# SELINUX= can take one of these threevalues:

#    enforcing - SELinux security policy is enforced.

#    permissive - SELinux prints warnings instead of enforcing.

#    disabled - No SELinux policy is loaded.

SELINUX=disabled

# SELINUXTYPE= can take one of these twovalues:

#    targeted - Targeted processes are protected,

#    mls - Multi Level Security protection.

SELINUXTYPE=targeted

 

 

[root@node1 ~]# service iptables save

iptables:将防火墙规则保存到 /etc/sysconfig/iptables:     [确定]

[root@node1 ~]# service iptables stop

iptables:清除防火墙规则:                                [确定]

iptables:将链设置为政策 ACCEPT:filter                    [确定]

iptables:正在卸载模块:                                  [确定]

[root@node1 ~]# chkconfig iptables off

 

Node2服务器

 

[root@node2 ~]# vi /etc/selinux/config

# This file controls the state of SELinuxon the system.

# SELINUX= can take one of these threevalues:

#    enforcing - SELinux security policy is enforced.

#    permissive - SELinux prints warnings instead of enforcing.

#    disabled - No SELinux policy is loaded.

SELINUX=disabled

# SELINUXTYPE= can take one of these twovalues:

#    targeted - Targeted processes are protected,

#    mls - Multi Level Security protection.

SELINUXTYPE=targeted

 

 

[root@node2 ~]# service iptables save

iptables:将防火墙规则保存到 /etc/sysconfig/iptables:     [确定]

[root@node2 ~]# service iptables stop

iptables:清除防火墙规则:                                [确定]

iptables:将链设置为政策 ACCEPT:filter                    [确定]

iptables:正在卸载模块:                                  [确定]

[root@node2 ~]# chkconfig iptables off

 

1.8     配置双机信任关系

针对两个节点上的grid和oracle用户均要配置ssh双机信任关系

配置grid用户rsa 和dsa key文件

Node1服务器

[grid@node1 ~]$  ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key(/home/grid/.ssh/id_rsa):

Created directory '/home/grid/.ssh'.

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/grid/.ssh/id_rsa.

Your public key has been saved in/home/grid/.ssh/id_rsa.pub.

The key fingerprint is:

58:85:d2:8e:9f:f0:78:b7:b0:a3:26:79:b8:92:36:23grid@node1

The key's randomart image is:

+--[ RSA 2048]----+

|      . ..      |

|     . o.       |

|      +.        |

|     oo.        |

|     .=S.       |

|     . * .      |

|   .o . + .     |

|E * + o o .      |

| o +.=.. .       |

+-----------------+

[grid@node1 ~]$  ssh-keygen -t dsa

Generating public/private dsa key pair.

Enter file in which to save the key(/home/grid/.ssh/id_dsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/grid/.ssh/id_dsa.

Your public key has been saved in/home/grid/.ssh/id_dsa.pub.

The key fingerprint is:

6d:bc:21:95:01:d4:e7:ee:ea:e0:95:34:7f:56:e5:12grid@node1

The key's randomart image is:

+--[ DSA 1024]----+

|      .oo.      |

|         .o.    |

|         oo  E .|

|        +  .  o.|

|       S B.  . o|

|        + *.  o |

|       . +.. o  |

|      . o  .o   |

|       ..o.     |

+-----------------+

Node2服务器

 

[grid@node2 ~]$ ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key(/home/grid/.ssh/id_rsa):

Created directory '/home/grid/.ssh'.

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in /home/grid/.ssh/id_rsa.

Your public key has been saved in/home/grid/.ssh/id_rsa.pub.

The key fingerprint is:

f8:35:6c:87:7d:04:1c:91:48:c0:93:90:be:0b:21:4bgrid@node2

The key's randomart image is:

+--[ RSA 2048]----+

|     .+.+.o++   |

|     . + . o.   |

|    .   .    .  |

|  E. .. . o .   |

| . o ...S * o .  |

|  .. .. o o .   |

|    . ..        |

|     .          |

|                 |

+-----------------+

[grid@node2 ~]$ ssh-keygen -t dsa

Generating public/private dsa key pair.

Enter file in which to save the key(/home/grid/.ssh/id_dsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/grid/.ssh/id_dsa.

Your public key has been saved in/home/grid/.ssh/id_dsa.pub.

The key fingerprint is:

d0:26:84:db:0e:a0:dd:c0:a8:b1:e6:b8:0f:3d:a4:37grid@node2

The key's randomart image is:

+--[ DSA 1024]----+

| o  ..          |

|o + .. .         |

|.= + oo o        |

|+.. + .+         |

|+ . o  S        |

|.=   .          |

|o.E              |

|.o o             |

| ..              |

+-----------------+

 

在node1上将两台主机生成的rsa和dsa key文件加入到authorized key文件中,然后把node1主机上的authorized文件拷贝到node2上

 

[grid@node1 .ssh]$ pwd

/home/grid/.ssh

[grid@node1 .ssh]$ cat ./id_rsa.pub>> ./authorized_keys

[grid@node1 .ssh]$  cat ./id_dsa.pub >> ./authorized_keys

[grid@node1 .ssh]$  ssh node2 cat ~/.ssh/id_rsa.pub >>./authorized_keys

The authenticity of host 'node2(192.168.56.105)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node2,192.168.56.105' (RSA) to the list of known hosts.

grid@node2's password:

[grid@node1 .ssh]$ ssh node2 cat~/.ssh/id_dsa.pub >> ./authorized_keys

grid@node2's password:

[grid@node1 .ssh]$ scp authorized_keysnode2:~/.ssh/authorized_keys

grid@node2's password:

Permission denied, please try again.

grid@node2's password:

authorized_keys                                                                                                                                        100% 1984     1.9KB/s  00:00

 

配置oracle用户的互信

Node服务器

[oracle@node1 ~]$ ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key(/home/oracle/.ssh/id_rsa):

Created directory '/home/oracle/.ssh'.

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/oracle/.ssh/id_rsa.

Your public key has been saved in/home/oracle/.ssh/id_rsa.pub.

The key fingerprint is:

68:cb:af:7e:32:d0:1f:35:f8:6b:1c:63:6e:4e:da:70oracle@node1

The key's randomart image is:

+--[ RSA 2048]----+

|                 |

|                 |

|        .       |

|      .. o      |

|    .o So .     |

|   .o... =      |

|    .o..=E+     |

|     o.o**      |

|    .o=o+o      |

+-----------------+

[oracle@node1 ~]$ ssh-keygen -t dsa

Generating public/private dsa key pair.

Enter file in which to save the key(/home/oracle/.ssh/id_dsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/oracle/.ssh/id_dsa.

Your public key has been saved in/home/oracle/.ssh/id_dsa.pub.

The key fingerprint is:

fe:45:af:f3:f0:8b:42:1e:fd:d4:c3:45:15:2f:6b:20oracle@node1

The key's randomart image is:

+--[ DSA 1024]----+

|               .+|

|                o|

|         E . ...|

|          . . o.|

|       S  .. +..|

|      .  o..o.o.|

|       .o .oo. .|

|        .o..=.  |

|         ..oo+. |

+-----------------+

Node2服务器

[oracle@node2 ~]$  ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key(/home/oracle/.ssh/id_rsa):

Created directory '/home/oracle/.ssh'.

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/oracle/.ssh/id_rsa.

Your public key has been saved in/home/oracle/.ssh/id_rsa.pub.

The key fingerprint is:

9f:51:c4:e3:b9:1a:15:44:8e:bd:0d:df:4e:6b:be:63oracle@node2

The key's randomart image is:

+--[ RSA 2048]----+

|          ++    |

|          =+    |

|         ..=+   |

|          .+= . |

|       S ....o o|

|        ..o.  o.|

|         oo   o.|

|         .   oE |

|              .oo|

+-----------------+

[oracle@node2 ~]$  ssh-keygen -t dsa

Generating public/private dsa key pair.

Enter file in which to save the key(/home/oracle/.ssh/id_dsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in/home/oracle/.ssh/id_dsa.

Your public key has been saved in/home/oracle/.ssh/id_dsa.pub.

The key fingerprint is:

25:b7:72:6c:7a:8b:71:19:d4:f6:91:96:34:eb:c9:aaoracle@node2

The key's randomart image is:

+--[ DSA 1024]----+

|              o |

|          . . = |

|       . + o *  |

|        * o = o |

|       S *   =  |

|        = o .   |

|       o + .    |

|        = o     |

|       . E      |

+-----------------+

 

[oracle@node1 .ssh]$ cat ./id_rsa.pub>> ./authorized_keys

[oracle@node1 .ssh]$ cat ./id_dsa.pub>> ./authorized_keys   

[oracle@node1 .ssh]$ ssh node2 cat~/.ssh/id_rsa.pub >> ./authorized_keys

The authenticity of host 'node2(192.168.56.105)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node2,192.168.56.105' (RSA) to the list of known hosts.

oracle@node2's password:

[oracle@node1 .ssh]$ ssh node2 cat~/.ssh/id_dsa.pub >> ./authorized_keys

oracle@node2's password:

[oracle@node1 .ssh]$ scp authorized_keysnode2:~/.ssh/authorized_keys

oracle@node2's password:

authorized_keys                                                                                                                                         100%1992     2.0KB/s   00:00   

[oracle@node1 .ssh]$

 

 

 

完成后再node1和node2上面分别以grid和oralce测试:

Node1服务器

[oracle@node1 .ssh]$ ssh node2 date

2018年 05月 31日星期四 00:47:07 CST

[oracle@node1 .ssh]$ ssh node2-priv date

The authenticity of host 'node2-priv(8.8.8.88)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node2-priv,8.8.8.88' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:47:26 CST

[oracle@node1 .ssh]$ su - grid

密码:

[grid@node1 ~]$  ssh node2 date

2018年 05月 31日星期四 00:47:51 CST

[grid@node1 ~]$  ssh node2-priv date

The authenticity of host 'node2-priv(8.8.8.88)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node2-priv,8.8.8.88' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:48:01 CST

 

Node2服务器

[oracle@node2 ~]$ sshnode1 date

The authenticity of host 'node1(192.168.56.103)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continue connecting(yes/no)? yes

Warning: Permanently added'node1,192.168.56.103' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:48:23 CST

[oracle@node2 ~]$ssh node1-priv date

The authenticity of host 'node1-priv(8.8.8.8)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node1-priv,8.8.8.8' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:48:36 CST

[oracle@node2 ~]$

[oracle@node2 ~]$

[oracle@node2 ~]$

[oracle@node2 ~]$ su- grid

密码:

[grid@node2 ~]$ sshnode1 date

The authenticity of host 'node1(192.168.56.103)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node1,192.168.56.103' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:48:53 CST

[grid@node2 ~]$ sshnode1-priv date

The authenticity of host 'node1-priv(8.8.8.8)' can't be established.

RSA key fingerprint isfd:dc:13:60:e2:cc:63:c0:6b:3d:b8:70:d2:f6:b9:70.

Are you sure you want to continueconnecting (yes/no)? yes

Warning: Permanently added'node1-priv,8.8.8.8' (RSA) to the list of known hosts.

2018年 05月 31日星期四 00:49:03 CST

 

1.9     配置时间同步

只需要删除ntp.conf文件

查看状态crsctl check ctss

 

时间同步一样有两种方式实现:ntp和ctss

rac安装时当安装程序发现ntp处于非活动状态时,安装集群时间同步服务将以活动模式自动进行安装并同步所有节点的时间,如果发现配置了ntp,则以观察者模式启动集群时间同步服务

 

oralce集群时间同步服务就是为了oracle rac数据库无法访问ntp服务的组织提供时间同步服务的

 

1、 设置ctss时间同步

Node1服务器

[root@node1 ~]# service ntpd status

ntpd 已停

[root@node1 ~]# chkconfig ntpd off

[root@node1 ~]# mv /etc/ntp.conf/etc/ntp.conf.bak

[root@node1 ~]# chkconfig  ntpd --list

ntpd            0:关闭  1:关闭  2:关闭  3:关闭  4:关闭  5:关闭  6:关闭

 

node2服务器

[root@node2 ~]# service ntpd status 

ntpd 已停

[root@node2 ~]#  chkconfig ntpd off

[root@node2 ~]#  mv /etc/ntp.conf /etc/ntp.conf.bak

[root@node2 ~]# chkconfig  ntpd --list

ntpd            0:关闭  1:关闭  2:关闭  3:关闭  4:关闭  5:关闭  6:关闭

 

 

1.10  系统参数设置

(node1服务器和node2服务器都需要做,示例只在node1服务器做)

1、 修改limits.conf文件

[root@node1 ~]# vi/etc/security/limits.conf

 

grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536

grid soft core unlimited


oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536

oracle soft core unlimited

oracle soft stack 10240

 

2、 修改login文件

 

[root@node1 ~]# vi /etc/pam.d/login

 

session   required     pam_limits.so

 

3、 修改sysctl.conf文件

 

[root@node1 ~]# vi /etc/sysctl.conf

 

kernel.shmmax = 4294967295

kernel.shmall = 2097152

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

fs.file-max = 6815744

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default=262144

net.core.rmem_max=4194304

net.core.wmem_default=262144

net.core.wmem_max=1048576

fs.aio-max-nr=1048576

 

4、 检查安装包

 

rpm -q binutils compat-libstdc++-33elfutils-libelf elfutils-libelf-devel gcc gcc-c++ glibc glibc-commonglibc-devel glibc-headers kernel-headers ksh libaio  libaio-devel libgcc libgomp libstdc++libstdc++-devel make numactl-devel sysstat unixODBC unixODBC-devel

 

配置yum源安装缺失的包

[root@node1 yum.repos.d]# cp public-yum-ol6.repooracle.repo

[root@node1 yum.repos.d]# mvpublic-yum-ol6.repo public-yum-ol6.repo.bak

[root@node1 yum.repos.d]# vi oracle.repo

[oracle]

name=Oracle

baseurl=file:///iso

gpgcheck=0

enabled=1

 

[root@node1 yum.repos.d]# yum clean all

[root@node1 yum.repos.d]# yum repolist

 

安装缺失包

 

[root@node1 yum.repos.d]# yum -y installnumactl-devel unixODBC unixODBC-devel

 

 

 

1.11  安装cvuqdisk包

 

cvuqdisk 包是在集群验证实验程序运行时用于发现共享磁盘,该包已经包含在grid/rpm目录下面,需要在所有节点上安装

 

1、 上传安装包

2、 解压安装包

3、 安装包

Node1服务器

[root@node1 grid]# cd grid/

[root@node1 grid]# ls

install readme.html  response  rpm runcluvfy.sh  runInstaller  sshsetup stage  welcome.html

[root@node1 grid]# cd rpm/

[root@node1 rpm]# ls

cvuqdisk-1.0.9-1.rpm

[root@node1 rpm]# rpm -ivhcvuqdisk-1.0.9-1.rpm

Preparing...                ###########################################[100%]

Using default group oinstall to installpackage

  1:cvuqdisk              ########################################### [100%]

Node2服务器

[root@node2 grid]# cd grid/

[root@node2 grid]# cd rpm/

[root@node2 rpm]# ls

cvuqdisk-1.0.9-1.rpm

[root@node2 rpm]# rpm -ivhcvuqdisk-1.0.9-1.rpm

Preparing...               ########################################### [100%]

Using default group oinstall to installpackage

  1:cvuqdisk              ########################################### [100%]

 

1.1     创建共享存储

 

1.1.1  计划

计划创建13个共享磁盘前三个为表决磁盘

         表决磁盘是因网络导致脑裂的情况而创建的

         如果放入asm中有以下几点要求

                  1、表决磁盘文件必须全部放在asm中

                  2、表决磁盘存在asm中分个数不能删除和添加,而是通过asm的normal,high,external冗余级别决定的

                  3、表决磁盘在11g中不在支持dd命令对其进行备份和还原,而是crsctl相关命令

                  4、表决磁盘文件个数要是奇数,便于投票选举且表决磁盘文件个数最多为15个,一般运行没必要超过5个

其余10个分别建立DATA和FRA两个磁盘组

具体分配如下

磁盘组

ocr_voting

         映射关系

         /dev/sdb1..3--->crs1..3

         磁盘大小

         1G*3

DATA

         映射关系

         /dev/sdb5..10---->data1..10

         磁盘大小

         2g*5

FRA

         映射关系

         /dev/sdb11..13--->FRA1..3

         磁盘大小

         2g*3

 

 

1.1.2  在node1服务器上进行磁盘分区

[root@node1 ~]# fdisk /dev/sdb

Device contains neither a valid DOSpartition table, nor Sun, SGI or OSF disklabel

Building a new DOS disklabel with diskidentifier 0x654475ac.

Changes will remain in memory only, untilyou decide to write them.

After that, of course, the previous contentwon't be recoverable.

 

Warning: invalid flag 0x0000 of partitiontable 4 will be corrected by w(rite)

 

WARNING: DOS-compatible mode is deprecated.It's strongly recommended to

        switch off the mode (command 'c') and change display units to

        sectors (command 'u').

 

Command (m for help): p

 

Disk /dev/sdb: 107.4 GB, 107374182400 bytes

255 heads, 63 sectors/track, 13054 cylinders

Units = cylinders of 16065 * 512 = 8225280bytes

Sector size (logical/physical): 512 bytes /512 bytes

I/O size (minimum/optimal): 512 bytes / 512bytes

Disk identifier: 0x654475ac

 

  Device Boot      Start         End      Blocks  Id  System

 

Command (m for help): n

Command action

  e   extended

  p   primary partition (1-4)

p

Partition number (1-4): 1

First cylinder (1-13054, default 1):

Using default value 1

Last cylinder, +cylinders or +size{K,M,G}(1-13054, default 13054): +1G   

 

Command (m for help): n

Command action

  e   extended

  p   primary partition (1-4)

p

Partition number (1-4): 2

First cylinder (133-13054, default 133):

Using default value 133

Last cylinder, +cylinders or +size{K,M,G}(133-13054, default 13054): +1G

 

Command (m for help): n

Command action

  e   extended

  p   primary partition (1-4)

p

Partition number (1-4): 3

First cylinder (265-13054, default 265):

Using default value 265

Last cylinder, +cylinders or +size{K,M,G}(265-13054, default 13054): +1G

 

Command (m for help): n

Command action

  e   extended

  p   primary partition (1-4)

e

Selected partition 4

First cylinder (397-13054, default 397):

Using default value 397

Last cylinder, +cylinders or +size{K,M,G}(397-13054, default 13054):

Using default value 13054

 

Command (m for help): n

First cylinder (397-13054, default 397):

Using default value 397

Last cylinder, +cylinders or +size{K,M,G}(397-13054, default 13054): +2G

 

Command (m for help): N

First cylinder (659-13054, default 659):

Using default value 659

Last cylinder, +cylinders or +size{K,M,G}(659-13054, default 13054): +2G

 

Command (m for help): n

First cylinder (921-13054, default 921):

Using default value 921

Last cylinder, +cylinders or +size{K,M,G}(921-13054, default 13054): +2G

 

Command (m for help): n

First cylinder (1183-13054, default 1183):

Using default value 1183

Last cylinder, +cylinders or +size{K,M,G}(1183-13054, default 13054): +2G

 

Command (m for help): n

First cylinder (1445-13054, default 1445):

Using default value 1445

Last cylinder, +cylinders or +size{K,M,G}(1445-13054, default 13054): +2G

 

Command (m for help):

Command (m for help): n

First cylinder (1707-13054, default 1707):

Using default value 1707

Last cylinder, +cylinders or +size{K,M,G}(1707-13054, default 13054): +2G

 

Command (m for help): n

First cylinder (1969-13054, default 1969):

Using default value 1969

Last cylinder, +cylinders or +size{K,M,G}(1969-13054, default 13054): +2G

 

Command (m for help): n

First cylinder (2231-13054, default 2231):

Using default value 2231

Last cylinder, +cylinders or +size{K,M,G}(2231-13054, default 13054): +2G

 

Command (m for help): p

 

Disk /dev/sdb: 107.4 GB, 107374182400 bytes

255 heads, 63 sectors/track, 13054cylinders

Units = cylinders of 16065 * 512 = 8225280bytes

Sector size (logical/physical): 512 bytes /512 bytes

I/O size (minimum/optimal): 512 bytes / 512bytes

Disk identifier: 0x654475ac

 

  Device Boot      Start         End      Blocks  Id  System

/dev/sdb1               1         132    1060258+  83  Linux

/dev/sdb2             133         264    1060290   83  Linux

/dev/sdb3             265         396    1060290   83  Linux

/dev/sdb4             397       13054  101675385    5  Extended

/dev/sdb5             397         658    2104483+  83  Linux

/dev/sdb6             659         920    2104483+  83  Linux

/dev/sdb7             921        1182    2104483+  83  Linux

/dev/sdb8            1183        1444    2104483+  83  Linux

/dev/sdb9            1445        1706    2104483+  83  Linux

/dev/sdb10           1707        1968    2104483+  83  Linux

/dev/sdb11           1969        2230    2104483+  83  Linux

/dev/sdb12           2231        2492    2104483+  83  Linux

 

Command (m for help): w

The partition table has been altered!

 

Calling ioctl() to re-read partition table.

Syncing disks.

 

1.1.3  node2服务器查看共享存储

[root@node2 dev]# ls -l /dev/sdb*

brw-rw---- 1 root disk 8, 16 6月   5 17:14 /dev/sdb

brw-rw---- 1 root disk 8, 17 6月   5 17:14 /dev/sdb1

brw-rw---- 1 root disk 8, 26 6月   5 17:14 /dev/sdb10

brw-rw---- 1 root disk 8, 27 6月   5 17:14 /dev/sdb11

brw-rw---- 1 root disk 8, 28 6月   5 17:14 /dev/sdb12

brw-rw---- 1 root disk 8, 29 6月   5 17:14 /dev/sdb13

brw-rw---- 1 root disk 8, 18 6月   5 17:14 /dev/sdb2

brw-rw---- 1 root disk 8, 19 6月   5 17:14 /dev/sdb3

brw-rw---- 1 root disk 8, 20 6月   5 17:14 /dev/sdb4

brw-rw---- 1 root disk 8, 21 6月   5 17:14 /dev/sdb5

brw-rw---- 1 root disk 8, 22 6月   5 17:14 /dev/sdb6

brw-rw---- 1 root disk 8, 23 6月   5 17:14 /dev/sdb7

brw-rw---- 1 root disk 8, 24 6月   5 17:14 /dev/sdb8

brw-rw---- 1 root disk 8, 25 6月   5 17:14 /dev/sdb9

 

1.1.1  Oracleasm创建asm磁盘

 

node1服务器

 

[root@node1 rules.d]# oracleasm configure-i

Configuring the Oracle ASM library driver.

 

This will configure the on-boot propertiesof the Oracle ASM library

driver. The following questions will determine whether the driver is

loaded on boot and what permissions it willhave.  The current values

will be shown in brackets ('[]').  Hitting <ENTER> without typing an

answer will keep that current value.  Ctrl-C will abort.

 

Default user to own the driver interface[]: grid

Default group to own the driver interface[]: asmadmin

Start Oracle ASM library driver on boot(y/n) [y]:

Scan for Oracle ASM disks on boot (y/n)[y]:

Writing Oracle ASM library driverconfiguration: done

 

node2服务器同上操作

 

 

node1服务器

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk CRS1 /dev/sdb1

Marking disk "CRS1" as an ASMdisk:                        [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk CRS2 /dev/sdb2

Marking disk "CRS2" as an ASMdisk:                        [ OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk CRS3 /dev/sdb3

Marking disk "CRS3" as an ASMdisk:                        [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA1 /dev/sdb5

Marking disk "DATA1" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA2 /dev/sdb6

Marking disk "DATA2" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA3 /dev/sdb7

Marking disk "DATA3" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA4 /dev/sdb8

Marking disk "DATA4" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA5 /dev/sdb9

Marking disk "DATA5" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk DATA6 /dev/sdb10

Marking disk "DATA6" as an ASMdisk:                       [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk FRA1 /dev/sdb11

Marking disk "FRA1" as an ASMdisk:                        [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk FRA2 /dev/sdb12

Marking disk "FRA2" as an ASMdisk:                        [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk FRA3 /dev/sdb13

Marking disk "FRA3" as an ASMdisk:                        [  OK  ]

[root@node1 ~]# /etc/init.d/oracleasmcreatedisk FRA4 /dev/sdb14

Marking disk "FRA4" as an ASMdisk:                        [FAILED]

[root@node1 ~]# oracleasm listdisks

CRS1

CRS2

CRS3

DATA1

DATA2

DATA3

DATA4

DATA5

DATA6

FRA1

FRA2

FRA3

 

 

Node2服务器

[root@node2 ~]# oracleasm scandisks

Reloading disk partitions: done

Cleaning any stale ASM disks...

Scanning system for ASM disks...

Instantiating disk "CRS1"

Instantiating disk "CRS2"

Instantiating disk "CRS3"

Instantiating disk "DATA1"

Instantiating disk "DATA2"

Instantiating disk "DATA3"

Instantiating disk "DATA4"

Instantiating disk "DATA5"

Instantiating disk "DATA6"

Instantiating disk "FRA1"

Instantiating disk "FRA2"

Instantiating disk "FRA3"

[root@node2 ~]#  oracleasm listdisks

CRS1

CRS2

CRS3

DATA1

DATA2

DATA3

DATA4

DATA5

DATA6

FRA1

FRA2

FRA3

 

 

1      安装grid

 

修改scan name,并去掉勾选configure GNS

 

 

添加节点信息和节点互信

 

 

 

修改默认路径为oracle_bash

 

 

安装完成后对安装结果进行检验

[root@node1 ~]# su - grid

[grid@node1 ~]$ crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora.CRS.dg     ora....up.type ONLINE    ONLINE   node1      

ora....ER.lsnr ora....er.type ONLINE    ONLINE   node2      

ora....N1.lsnr ora....er.type ONLINE    ONLINE   node2      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.cvu        ora.cvu.type   ONLINE   ONLINE    node2      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....network ora....rk.type ONLINE    ONLINE   node2      

ora....SM1.asm application    ONLINE   ONLINE    node1      

ora....E1.lsnr application    ONLINE   OFFLINE              

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    OFFLINE              

ora.node1.vip  ora....t1.type ONLINE    ONLINE   node2      

ora....SM2.asm application    ONLINE   ONLINE    node2      

ora....E2.lsnr application    ONLINE   ONLINE    node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  ONLINE   ONLINE    node1      

ora.ons        ora.ons.type   ONLINE   ONLINE    node2      

ora....ry.acfs ora....fs.type ONLINE    ONLINE   node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node2  

 

2      创建DATA和FRA磁盘组

 

登录至grid用户执行命令asmca

 

[grid@node1 ~]$LANG=C

[grid@node1 ~]$asmca

 

 

点击create创建DATA数据盘

选择normal类型

 

点击create创建FRA盘

 

 

创建完成后

 

3      安装oracle软件

[root@node1 ~]# xhost +

[oracle@node1 ~]$ LANG=C

 

 

 

4      安装oracle数据库

 

 

取消em可以在以后单独配置em

 

 

 

 

 

 

 

 

 

 

创建日志组

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

修改连接数导致内存给的太小所以报错

 

 

 

Node1服务器检查

[oracle@node1 ~]$ sqlplus / as sysdba

 

SQL*Plus: Release 11.2.0.4.0 Production onTue Jun 12 17:21:41 2018

 

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database 11g Enterprise EditionRelease 11.2.0.4.0 - 64bit Production

With the Partitioning, Real ApplicationClusters, Automatic Storage Management, OLAP,

Data Mining and Real Application Testingoptions

 

SQL> select instance_name,status fromv$instance;

 

INSTANCE_NAME    STATUS

---------------- ------------

node1            OPEN

 

SQL>

Node2服务器检查

[oracle@node2 ~]$ sqlplus / as sysdba

 

SQL*Plus: Release 11.2.0.4.0 Production onTue Jun 12 17:22:13 2018

 

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database 11g Enterprise EditionRelease 11.2.0.4.0 - 64bit Production

With the Partitioning, Real ApplicationClusters, Automatic Storage Management, OLAP,

Data Mining and Real Application Testingoptions

 

SQL> select instance_name,status from v$instance;

 

INSTANCE_NAME    STATUS

---------------- ------------

node2            OPEN

 

SQL>

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值