Oracle 19c RAC installation on centos7.5

 

1. Node磁盘组规划

规划3个ASM磁盘组,每个磁盘组包含一至多块磁盘,其中/dev/sda为系统盘,ASM磁盘规划如下:
磁盘组名称    磁盘名称    磁盘大小    ASM磁盘名称    作用    冗余
OCR    /dev/sdb    10G     asm-ocr1    OCR/Voting File           EXTERNAL
DATA    /dev/sdc    40G    asm-data1    Data Files                   EXTERNAL
FRA    /dev/sdd    20G      asm-fra1      Fast Recovery area    EXTERNAL
 

磁盘组说明:

  • OCR:OCR和表决盘(OCR/Voting File)
  • DATA:数据盘(Data Files)
  • FRA:归档和快速恢复区(Fast Recovery area)
D:\software\centos\sharedisks>

D:\software\centos\sharedisks>"E:\Soft\BIGDATA\Centos\VM\VmvareWorkstation\vmware-vdiskmanager.exe" -c -s 10GB -t 4 sharedisk01.vmdk
Creating disk 'sharedisk01.vmdk'
  Create: 100% done.
Virtual disk creation successful.

D:\software\centos\sharedisks>"E:\Soft\BIGDATA\Centos\VM\VmvareWorkstation\vmware-vdiskmanager.exe" -c -s 40GB -t 4 sharedisk02.vmdk
Creating disk 'sharedisk02.vmdk'
  Create: 100% done.
Virtual disk creation successful.

D:\software\centos\sharedisks>"E:\Soft\BIGDATA\Centos\VM\VmvareWorkstation\vmware-vdiskmanager.exe" -c -s 20GB -t 4 sharedisk03.vmdk
Creating disk 'sharedisk03.vmdk'
  Create: 100% done.
Virtual disk creation successful.

D:\software\centos\sharedisks>

 文件中末尾加入以下内容后,才可以正常打开虚拟机:

disk.locking = "false"
scsi1.shareBus = "VIRTUAL"
disk.EnableUUID = "TRUE"

查看磁盘信息:

Node1:

[root@node1 ~]# 
[root@node1 ~]# lsscsi
[0:0:0:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sda 
[0:0:1:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdb 
[0:0:2:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdc 
[0:0:3:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdd 
[2:0:0:0]    cd/dvd  NECVMWar VMware IDE CDR10 1.00  /dev/sr0 
[root@node1 ~]# 

Node2 :

[root@node2 ~]# 
[root@node2 ~]# lsscsi
[0:0:0:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sda 
[0:0:1:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdb 
[0:0:2:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdc 
[0:0:3:0]    disk    VMware,  VMware Virtual S 1.0   /dev/sdd 
[2:0:0:0]    cd/dvd  NECVMWar VMware IDE CDR10 1.00  /dev/sr0 
[root@node2 ~]# 

IP网络设置

Node1 IP网络设置:

[root@node1 network-scripts]# cat ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="160dfb25-535e-49b1-ac14-791c0817279a"
DEVICE="ens33"
ONBOOT="yes"

IPADDR=192.168.8.111
GATEWAY=192.168.8.2
DNS1=192.168.8.2
DNS2=8.8.8.8

[root@node1 network-scripts]# cat ifcfg-ens34
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens34
UUID=7bc7fda1-d92c-42f8-b276-28d69df67b5c
DEVICE=ens34
ONBOOT=yes


IPADDR=192.168.100.111
GATEWAY=192.168.100.2
DNS1=192.168.100.2
DNS2=8.8.8.8
[root@node1 network-scripts]# 

Node2 IP网络设置:

[root@node2 network-scripts]# cat ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="afc93bb0-6ab3-4fda-a01b-65e5b4ce1e91"
DEVICE="ens33"
ONBOOT="yes"


IPADDR=192.168.8.112
GATEWAY=192.168.8.2
DNS1=192.168.8.2
DNS2=8.8.8.8
[root@node2 network-scripts]# cat ifcfg-ens34
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens34
UUID=f0182ece-4d8f-4ec4-b7ef-8263da0d7fbc
DEVICE=ens34
ONBOOT=yes


IPADDR=192.168.100.112
GATEWAY=192.168.100.2
DNS1=192.168.100.2
DNS2=8.8.8.8
[root@node2 network-scripts]# 

配置主机名

# node1
hostnamectl set-hostname node1

# node2
hostnamectl set-hostname node2

配置/etc/hosts

[oracle@node1 ~]$ cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6



# Public
192.168.8.111 node1 node1.racdb.local
192.168.8.112 node2 node2.racdb.local

# Private
192.168.100.111 node1-priv node1-priv.racdb.local
192.168.100.112 node2-priv node2-priv.racdb.local

# Virtual
192.168.8.113 node1-vip node1-vip.racdb.local
192.168.8.114 node2-vip node2-vip.racdb.local

# SCAN
192.168.8.115 node-cluster-scan node-cluster-scan.racdb.local
192.168.8.116 node-cluster-scan node-cluster-scan.racdb.local
192.168.8.117 node-cluster-scan node-cluster-scan.racdb.local
[oracle@node1 ~]$ 

配置时间同步

使用chrony从公网同步时间(由于拥有CTSS进行时间同步,此步骤无需操作

yum install -y chrony
systemctl enable --now chronyd

由于有Oracle Cluster Time Synchronization Service (CTSS)同步时间 ,需要将 NTP和chronyd都禁言。
 

rm -rf /var/run/chronyd.pid
rm -rf /etc/chrony.conf
rm -rf /etc/ntp.conf
systemctl stop ntpd.service
systemctl disable ntpd
systemctl stop chronyd.service
systemctl disable chronyd.service



----------------------------------------------


# 检查集群是否存在第三方时间同步服务
cluvfy comp clocksync -n all

# 检查ctss的状态
[grid@node1 ~]$ crsctl check ctss
CRS-4700: The Cluster Time Synchronization Service is in Observer mode

# 删除第三方时间同步配置文件

rm -rf /etc/ntp.conf
rm -rf /etc/chrony.conf
rm -rf /var/run/chronyd.pid

# 过大概半分钟 就可以看到状态变成了active模式

[grid@node1 ~]$ crsctl check ctss
CRS-4701: The Cluster Time Synchronization Service is in Active mode.
CRS-4702: Offset (in msec): 0

# 再次检查集群时间同步

[grid@node1 ~]$ cluvfy comp clocksync -n all

Verifying Clock Synchronization ...PASSED

Verification of Clock Synchronization across the cluster nodes was successful. 

CVU operation performed:      Clock Synchronization across the cluster nodes
Date:                         Nov 9, 2022 9:12:41 PM
CVU home:                     /u01/app/19.3.0/grid/
User:                         grid
[grid@node1 ~]$ 

配置selinux及防火墙

关闭selinux

 getenforce  ##先检查是否是“Disabled”,如果不是,执行下列操作

sed -i 's/=enforcing/=disabled/' /etc/selinux/config  ##如果enforcing是permissive也需要修改成disabled

重启OS后执行getenforce查看时候是“Disabled”

关闭firewalld防火墙

systemctl disable --now firewalld

[root@node1 ~]#
[root@node1 ~]# systemctl disable --now firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node1 ~]#

[root@node2 ~]#
[root@node2 ~]# systemctl disable --now firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node2 ~]#

安装依赖包

yum groupinstall -y "Server with GUI"
yum install -y bc \
binutils \
compat-libcap1 \
compat-libstdc++-33 \
gcc \
gcc-c++ \
elfutils-libelf \
elfutils-libelf-devel \
glibc \
glibc-devel \
ksh \
libaio \
libaio-devel \
libgcc \
libstdc++ \
libstdc++-devel \
libxcb \
libX11 \
libXau \
libXi \
libXtst \
libXrender \
libXrender-devel \
make \
net-tools \
nfs-utils \
smartmontools \
sysstat \
e2fsprogs \
e2fsprogs-libs \
fontconfig-devel \
expect \
unzip \
openssh-clients \
readline* \
tigervnc* \
psmisc --skip-broken

 检查依赖包安装情况:

rpm -q bc binutils compat-libcap1 compat-libstdc++-33 gcc gcc-c++ elfutils-libelf elfutils-libelf-devel glibc glibc-devel ksh libaio libaio-devel libgcc libstdc++ libstdc++-devel libxcb libX11 libXau libXi libXtst libXrender libXrender-devel make net-tools nfs-utils smartmontools sysstat e2fsprogs e2fsprogs-libs fontconfig-devel expect unzip openssh-clients readline | grep "not installed"

创建相关用户组

创建用户及组

# 创建用户组
groupadd -g 54321 oinstall
groupadd -g 54322 dba
groupadd -g 54323 oper
groupadd -g 54324 backupdba
groupadd -g 54325 dgdba
groupadd -g 54326 kmdba
groupadd -g 54327 asmdba
groupadd -g 54328 asmoper
groupadd -g 54329 asmadmin
groupadd -g 54330 racdba

# 创建用户并加入组
useradd -u 54321 -g oinstall -G dba,asmdba,backupdba,dgdba,kmdba,racdba,oper oracle 
useradd -u 54331 -g oinstall -G dba,asmdba,asmoper,asmadmin,racdba grid

# 设置用户密码
echo "oracle" | passwd oracle --stdin
echo "grid" | passwd grid --stdin

创建相应目录

mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle/product/19.3.0/dbhome_1

chown -R grid:oinstall /u01
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/

使能shmem

cat >>/etc/fstab<<EOF
tmpfs /dev/shm tmpfs defaults,size=8G 0 0
EOF

配置NOZEROCONF

cat >>/etc/sysconfig/network<<EOF
NOZEROCONF=yes
EOF

登录配置

cat >>/etc/pam.d/login<<EOF
#ORACLE SETTING
session required pam_limits.so
EOF

配置内核参数

# 配置以下内核参数
cat >/etc/sysctl.d/97-oracledatabase-sysctl.conf<<EOF
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 4294967295
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
EOF

# 使配置生效
sysctl --system

为用户设置安全限制

为oracle及grid用户配置安全限制

cat >/etc/security/limits.d/30-oracle.conf<<EOF
grid  soft  nproc 16384
grid  hard  nproc 16384
grid  soft  nofile 1024
grid  hard  nofile 65536
grid  soft  stack 10240
grid  hard  stack 32768
oracle soft nproc 16384
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack  10240
oracle hard stack  32768
oracle hard memlock 3145728
oracle soft memlock 3145728
EOF

cat>>/etc/security/limits.d/20-nproc.conf<<EOF
* - nproc 16384
EOF

修改用户profile

注意修改ORACLE_HOSTNAME及ORACLE_SID变量,node1节点与node2节点不同。

其中grid用户配置,节点1的ORACLE_SID=+ASM1,节点2的ORACLE_SID=+ASM2。

node1节点配置

# grid用户
cat>>/home/grid/.bash_profile<<'EOF'
# oracle grid
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_HOSTNAME=node1.racdb.local
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/19.3.0/grid
export ORACLE_SID=+ASM1
export ORACLE_TERM=xterm
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
EOF

# oracle用户
cat>>/home/oracle/.bash_profile<<'EOF'
# oracle
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_HOSTNAME=node1.racdb.local
export ORACLE_UNQNAME=racdb
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/19.3.0/dbhome_1
export ORACLE_SID=racdb1
export ORACLE_TERM=xterm
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
EOF

node2节点配置

# grid用户
cat>>/home/grid/.bash_profile<<'EOF'
# oracle grid
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_HOSTNAME=node2.racdb.local
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/19.3.0/grid
export ORACLE_SID=+ASM2
export ORACLE_TERM=xterm
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
EOF

# oracle用户
cat>>/home/oracle/.bash_profile<<'EOF'
# oracle
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_HOSTNAME=node2.racdb.local
export ORACLE_UNQNAME=racdb
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/19.3.0/dbhome_1
export ORACLE_SID=racdb2
export ORACLE_TERM=xterm
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
EOF

节点免密配置

节点node1配置:

su - grid
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
ssh-copy-id node2
 
su - oracle
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
ssh-copy-id node2

节点node2配置:

su - grid
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
ssh-copy-id node1
 
su - oracle
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
ssh-copy-id node1

配置/etc/sysconfig/network-scripts目录下的网卡配置文件,加上下面参数。

两台机配好IP并重启网络服务后,将自动绑定划分的两块网卡。

HOTPLUG="no"

关闭Transparent HugePages

# 创建systemd文件
cat > /etc/systemd/system/disable-thp.service <<EOF

[Unit]
Description=Disable Transparent Huge Pages (THP)

[Service]
Type=simple
ExecStart=/bin/sh -c "echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled && echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag"

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
systemctl enable --now disable-thp

[root@node1 ~]#
[root@node1 ~]# cat > /etc/systemd/system/disable-thp.service <<EOF
>
> [Unit]
> Description=Disable Transparent Huge Pages (THP)
>
> [Service]
> Type=simple
> ExecStart=/bin/sh -c "echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled && echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag"
>
> [Install]
> WantedBy=multi-user.target
> EOF
[root@node1 ~]# systemctl enable --now disable-thp
Created symlink from /etc/systemd/system/multi-user.target.wants/disable-thp.service to /etc/systemd/system/disable-thp.service.
[root@node1 ~]#

[root@node2 ~]#
[root@node2 ~]# cat > /etc/systemd/system/disable-thp.service <<EOF
>
> [Unit]
> Description=Disable Transparent Huge Pages (THP)
>
> [Service]
> Type=simple
> ExecStart=/bin/sh -c "echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled && echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag"
>
> [Install]
> WantedBy=multi-user.target
> EOF
[root@node2 ~]# systemctl enable --now disable-thp
Created symlink from /etc/systemd/system/multi-user.target.wants/disable-thp.service to /etc/systemd/system/disable-thp.service.
[root@node2 ~]#

配置共享存储 (注意安装两个包之后先reboot再初期化)

yum install -y kmod-oracleasm

wget https://download.oracle.com/otn_software/asmlib/oracleasmlib-2.0.12-1.el7.x86_64.rpm
wget https://yum.oracle.com/repo/OracleLinux/OL7/latest/x86_64/getPackage/oracleasm-support-2.1.11-2.el7.x86_64.rpm
yum -y localinstall oracleasmlib-2.0.12-1.el7.x86_64.rpm
yum -y localinstall oracleasm-support-2.1.11-2.el7.x86_64.rpm

#初始化
oracleasm init

#修改配置
oracleasm configure -e -u grid -g asmadmin

查看配置:

[root@node1 ~]# oracleasm configure
ORACLEASM_ENABLED=true
ORACLEASM_UID=grid
ORACLEASM_GID=asmadmin
ORACLEASM_SCANBOOT=true
ORACLEASM_SCANORDER=""
ORACLEASM_SCANEXCLUDE=""
ORACLEASM_SCAN_DIRECTORIES=""
ORACLEASM_USE_LOGICAL_BLOCK_SIZE="false"
[root@node1 ~]# 
[root@node2 ~]# oracleasm configure
ORACLEASM_ENABLED=true
ORACLEASM_UID=grid
ORACLEASM_GID=asmadmin
ORACLEASM_SCANBOOT=true
ORACLEASM_SCANORDER=""
ORACLEASM_SCANEXCLUDE=""
ORACLEASM_SCAN_DIRECTORIES=""
ORACLEASM_USE_LOGICAL_BLOCK_SIZE="false"
[root@node2 ~]# 

仅在node1节点执行,进行磁盘分区:

parted /dev/sdb -s -- mklabel gpt mkpart primary 1 -1
parted /dev/sdc -s -- mklabel gpt mkpart primary 1 -1
parted /dev/sdd -s -- mklabel gpt mkpart primary 1 -1

[root@node1 ~]#
[root@node1 ~]# parted /dev/sdb -s -- mklabel gpt mkpart primary 1 -1
[root@node1 ~]# parted /dev/sdc -s -- mklabel gpt mkpart primary 1 -1
[root@node1 ~]# parted /dev/sdd -s -- mklabel gpt mkpart primary 1 -1
[root@node1 ~]#

确认分区情况

[root@node1 ~]# 
[root@node1 ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   80G  0 disk 
├─sda1            8:1    0    1G  0 part /boot
└─sda2            8:2    0   79G  0 part 
  ├─centos-root 253:0    0 47.8G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 23.3G  0 lvm  /home
sdb               8:16   0   10G  0 disk 
└─sdb1            8:17   0   10G  0 part 
sdc               8:32   0   40G  0 disk 
└─sdc1            8:33   0   40G  0 part 
sdd               8:48   0   20G  0 disk 
└─sdd1            8:49   0   20G  0 part 
sr0              11:0    1 1024M  0 rom  
[root@node1 ~]# 

仅在node1节点执行,使用oracleasm创建磁盘,根据你的实际盘符名:

oracleasm createdisk OCR1 /dev/sdb1
oracleasm createdisk DATA1 /dev/sdc1
oracleasm createdisk FRA1 /dev/sdd1

[root@node1 ~]#
[root@node1 ~]# oracleasm createdisk OCR1 /dev/sdb1
Writing disk header: done
Instantiating disk: done
[root@node1 ~]# oracleasm createdisk DATA1 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@node1 ~]# oracleasm createdisk FRA1 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@node1 ~]#

在所有节点执行

oracleasm scandisks
oracleasm listdisks

[root@node1 ~]#
[root@node1 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# oracleasm listdisks
DATA1
FRA1
OCR1
[root@node1 ~]#

[root@node2 ~]#
[root@node2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "OCR1"
Instantiating disk "DATA1"
Instantiating disk "FRA1"
[root@node2 ~]#
[root@node2 ~]#
[root@node2 ~]# oracleasm listdisks
DATA1
FRA1
OCR1
[root@node2 ~]#

查看磁盘设备

[root@node1 ~]# 
[root@node1 ~]# ls -l /dev/oracleasm/disks
total 0
brw-rw----. 1 grid asmadmin 8, 33 Nov  8 16:37 DATA1
brw-rw----. 1 grid asmadmin 8, 49 Nov  8 16:37 FRA1
brw-rw----. 1 grid asmadmin 8, 17 Nov  8 16:37 OCR1
[root@node1 ~]# 


[root@node2 ~]# 
[root@node2 ~]# ls -l /dev/oracleasm/disks
total 0
brw-rw----. 1 grid asmadmin 8, 33 Nov  8 16:39 DATA1
brw-rw----. 1 grid asmadmin 8, 49 Nov  8 16:39 FRA1
brw-rw----. 1 grid asmadmin 8, 17 Nov  8 16:39 OCR1
[root@node2 ~]# 

开始安装GRID

在第一个节点node1执行。

使用ssh登陆到grid用户,将下载好的安装包LINUX.X64_193000_grid_home.zip上传到$GRID_HOME目录。

解压到$ORACLE_HOME目录下

[grid@node1 ~]$ unzip LINUX.X64_193000_grid_home.zip -d $ORACLE_HOME

将cvuqdisk rpm包复制到集群上的每个节点

scp $ORACLE_HOME/cv/rpm/cvuqdisk-1.0.10-1.rpm root@node2:/tmp

切换回root用户安装cvuqdisk rpm包。

# node1
CVUQDISK_GRP=oinstall; export CVUQDISK_GRP
rpm -iv /u01/app/19.3.0/grid/cv/rpm/cvuqdisk-1.0.10-1.rpm

# node2
CVUQDISK_GRP=oinstall; export CVUQDISK_GRP
rpm -iv /tmp/cvuqdisk-1.0.10-1.rpm

由于使用最小安装的操作系统,若是无图形界面,则需要在node1节点安装xorg-x11(其实此包系统已经存在),并在windows中安装xming以调用GUI界面:

yum install -y xorg-x11-xinit

# 重新登录会话生效
exit

安装windows 版本的Xming

Xming X Server for Windows download | SourceForge.net

SecureCRT 中勾选如图所示:

windows中下载安装Xming Server,直接启动即可,SecureCRT将转发图形界面到Xming Server显示。

node1节点以grid用户身份登陆,转到ORACLE_HOME目录

[grid@racdb1:/home/grid]$ cd $ORACLE_HOME

 在node1命令行界面执行以下命令开始安装grid

./gridSetup.sh

 

 

 

 

 

 

 

 

 

 解决PRVG-13602问题

[root@node1 ~]# systemctl status ntpd.service
● ntpd.service - Network Time Service
   Loaded: loaded (/usr/lib/systemd/system/ntpd.service; disabled; vendor preset: disabled)
   Active: inactive (dead)

Nov 09 07:44:54 node1 ntpd[859]: Listen normally on 7 ens34 192.168.100.111 UDP 123
Nov 09 07:44:54 node1 ntpd[859]: Listen normally on 8 virbr0 192.168.122.1 UDP 123
Nov 09 07:44:54 node1 ntpd[859]: 193.182.111.14 interface 192.168.8.111 -> 192.168.100.111
Nov 09 07:44:54 node1 ntpd[859]: 185.209.85.222 interface 192.168.8.111 -> 192.168.100.111
Nov 09 07:44:54 node1 ntpd[859]: 194.58.203.148 interface 192.168.8.111 -> 192.168.100.111
Nov 09 07:44:54 node1 ntpd[859]: 116.203.151.74 interface 192.168.8.111 -> 192.168.100.111
Nov 09 07:44:54 node1 ntpd[859]: new interface(s) found: waking up resolver
Nov 09 08:01:32 node1 ntpd[859]: ntpd exiting on signal 15
Nov 09 08:01:32 node1 systemd[1]: Stopping Network Time Service...
Nov 09 08:01:32 node1 systemd[1]: Stopped Network Time Service.
[root@node1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:chronyd(8)
           man:chrony.conf(5)
[root@node1 ~]# 
[root@node1 ~]# systemctl enable --now chronyd
Created symlink from /etc/systemd/system/multi-user.target.wants/chronyd.service to /usr/lib/systemd/system/chronyd.service.
[root@node1 ~]# systemctl status chronyd      
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enabled)
   Active: active (running) since Wed 2022-11-09 08:04:42 CST; 2s ago
     Docs: man:chronyd(8)
           man:chrony.conf(5)
  Process: 4314 ExecStartPost=/usr/libexec/chrony-helper update-daemon (code=exited, status=0/SUCCESS)
  Process: 4310 ExecStart=/usr/sbin/chronyd $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 4312 (chronyd)
    Tasks: 1
   CGroup: /system.slice/chronyd.service
           └─4312 /usr/sbin/chronyd

Nov 09 08:04:41 node1 systemd[1]: Starting NTP client/server...
Nov 09 08:04:42 node1 chronyd[4312]: chronyd version 3.4 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +SECHASH +IPV6 +DEBUG)
Nov 09 08:04:42 node1 chronyd[4312]: Frequency 0.384 +/- 3.150 ppm read from /var/lib/chrony/drift
Nov 09 08:04:42 node1 systemd[1]: Started NTP client/server.
[root@node1 ~]#

 点击yes,自动执行脚本。

忽略以上报错,完成安装 INS-20802 Oracle Cluster Verification Utility Failed

获取特定资源的状态和配置信息

[grid@node1 grid]$ 
[grid@node1 grid]$ crsctl status resource -t
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.chad
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.net1.network
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.ons
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       node2                    STABLE
ora.LISTENER_SCAN2.lsnr
      1        ONLINE  ONLINE       node1                    STABLE
ora.LISTENER_SCAN3.lsnr
      1        ONLINE  ONLINE       node1                    STABLE
ora.OCR.dg(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asm(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    Started,STABLE
      2        ONLINE  ONLINE       node2                    Started,STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.cvu
      1        ONLINE  ONLINE       node1                    STABLE
ora.node1.vip
      1        ONLINE  ONLINE       node1                    STABLE
ora.node2.vip
      1        ONLINE  ONLINE       node2                    STABLE
ora.qosmserver
      1        ONLINE  ONLINE       node1                    STABLE
ora.scan1.vip
      1        ONLINE  ONLINE       node2                    STABLE
ora.scan2.vip
      1        ONLINE  ONLINE       node1                    STABLE
ora.scan3.vip
      1        ONLINE  ONLINE       node1                    STABLE
--------------------------------------------------------------------------------
[grid@node1 grid]$ 

检查本地服务器上的 Oracle High Availability Services 和 Oracle Clusterware 堆栈的状态

[grid@node1 grid]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@node1 grid]$ 

查看node1 IP信息

[grid@node1 grid]$ 
[grid@node1 grid]$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:38:85:6b brd ff:ff:ff:ff:ff:ff
    inet 192.168.8.111/24 brd 192.168.8.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.8.113/24 brd 192.168.8.255 scope global secondary ens33:1
       valid_lft forever preferred_lft forever
    inet 192.168.8.116/24 brd 192.168.8.255 scope global secondary ens33:3
       valid_lft forever preferred_lft forever
    inet 192.168.8.117/24 brd 192.168.8.255 scope global secondary ens33:4
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe38:856b/64 scope link 
       valid_lft forever preferred_lft forever
3: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:38:85:75 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.111/24 brd 192.168.100.255 scope global ens34
       valid_lft forever preferred_lft forever
    inet 169.254.10.54/19 brd 169.254.31.255 scope global ens34:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe38:8575/64 scope link 
       valid_lft forever preferred_lft forever
4: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:52:aa:7a brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
5: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:52:aa:7a brd ff:ff:ff:ff:ff:ff
[grid@node1 grid]$

查看node2 IP信息

[grid@node2 ~]$ 
[grid@node2 ~]$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:14:58:46 brd ff:ff:ff:ff:ff:ff
    inet 192.168.8.112/24 brd 192.168.8.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.8.115/24 brd 192.168.8.255 scope global secondary ens33:1
       valid_lft forever preferred_lft forever
    inet 192.168.8.114/24 brd 192.168.8.255 scope global secondary ens33:2
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe14:5846/64 scope link 
       valid_lft forever preferred_lft forever
3: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:14:58:50 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.112/24 brd 192.168.100.255 scope global ens34
       valid_lft forever preferred_lft forever
    inet 169.254.15.202/19 brd 169.254.31.255 scope global ens34:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe14:5850/64 scope link 
       valid_lft forever preferred_lft forever
4: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:b8:6c:cb brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
5: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:b8:6c:cb brd ff:ff:ff:ff:ff:ff
[grid@node2 ~]$ 

创建用于DB的磁盘组

使用GRID用户,运行asmca:

[grid@node1 grid]$ asmca

查询磁盘组挂载状态以及CRSD状态

[grid@node1 ~]$ 
[grid@node1 ~]$ sqlplus / as sysasm

SQL*Plus: Release 19.0.0.0.0 - Production on Wed Nov 9 14:50:54 2022
Version 19.3.0.0.0

Copyright (c) 1982, 2019, Oracle.  All rights reserved.


Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0

SQL> sekec^H^H
SP2-0042: unknown command "sek" - rest of line ignored.
SQL> select NAME,state from v$asm_diskgroup;

NAME                           STATE
------------------------------ -----------
OCR                            MOUNTED
DATA                           MOUNTED
FRA                            MOUNTED

SQL> 

开始安装ORACLE

SSH登陆到oracle用户,将下载的 zip 文件解压到ORACLE_HOME目录。

[oracle@node1 ~]$ unzip LINUX.X64_193000_db_home.zip -d $ORACLE_HOME

转到ORACLE_HOME目录

cd $ORACLE_HOME

然后运行runInstaller

./runInstaller

注意提示,对于RAC,先安装软件,再运行DBCA创建数据库:

 

 

 

 

 

 

 

 

开始安装数据库

ssh连接到oracle用户,验证 DBCA 的要求

/u01/app/19.3.0/grid/bin/cluvfy stage -pre dbcfg -fixup -n node1,node2 -d /u01/app/oracle/product/19.3.0/dbhome_1 -verbose

运行dbca:

dbca

 

 

 

 

 

 

 

 

 

 

 

 

 

查看状态 

[grid@node1 ~]$ crsctl status res -t
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.chad
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.net1.network
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
ora.ons
               ONLINE  ONLINE       node1                    STABLE
               ONLINE  ONLINE       node2                    STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.DATA.dg(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        ONLINE  OFFLINE                               STABLE
ora.FRA.dg(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        ONLINE  OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       node1                    STABLE
ora.LISTENER_SCAN2.lsnr
      1        ONLINE  ONLINE       node2                    STABLE
ora.LISTENER_SCAN3.lsnr
      1        ONLINE  ONLINE       node1                    STABLE
ora.OCR.dg(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asm(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    Started,STABLE
      2        ONLINE  ONLINE       node2                    Started,STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
      1        ONLINE  ONLINE       node1                    STABLE
      2        ONLINE  ONLINE       node2                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.cvu
      1        ONLINE  ONLINE       node1                    STABLE
ora.node1.vip
      1        ONLINE  ONLINE       node1                    STABLE
ora.node2.vip
      1        ONLINE  ONLINE       node2                    STABLE
ora.qosmserver
      1        ONLINE  ONLINE       node2                    STABLE
ora.racdb.db
      1        ONLINE  ONLINE       node1                    Open,HOME=/u01/app/o
                                                             racle/product/19.3.0
                                                             /dbhome_1,STABLE
      2        ONLINE  ONLINE       node2                    Open,HOME=/u01/app/o
                                                             racle/product/19.3.0
                                                             /dbhome_1,STABLE
ora.scan1.vip
      1        ONLINE  ONLINE       node1                    STABLE
ora.scan2.vip
      1        ONLINE  ONLINE       node2                    STABLE
ora.scan3.vip
      1        ONLINE  ONLINE       node1                    STABLE
--------------------------------------------------------------------------------
[grid@node1 ~]$ 

 验证数据库状态:

[grid@node1 ~]$ 
[grid@node1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node node1
Instance racdb2 is running on node node2
[grid@node1 ~]$ 

查看数据库配置

[grid@node1 ~]$ 
[grid@node1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node node1
Instance racdb2 is running on node node2
[grid@node1 ~]$ srvctl config database -d racdb
Database unique name: racdb
Database name: racdb
Oracle home: /u01/app/oracle/product/19.3.0/dbhome_1
Oracle user: oracle
Spfile: +DATA/RACDB/PARAMETERFILE/spfile.272.1120332595
Password file: +DATA/RACDB/PASSWORD/pwdracdb.256.1120329127
Domain: 
Start options: open
Stop options: immediate
Database role: PRIMARY
Management policy: AUTOMATIC
Server pools: 
Disk Groups: DATA,FRA
Mount point paths: 
Services: 
Type: RAC
Start concurrency: 
Stop concurrency: 
OSDBA group: dba
OSOPER group: oper
Database instances: racdb1,racdb2
Configured nodes: node1,node2
CSS critical: no
CPU count: 0
Memory target: 0
Maximum memory: 0
Default network number for database services: 
Database is administrator managed
[grid@node1 ~]$ 

连接数据库查看

[oracle@node1 ~]$ sqlplus / as sysdba

SQL*Plus: Release 19.0.0.0.0 - Production on Wed Nov 9 19:38:59 2022
Version 19.3.0.0.0

Copyright (c) 1982, 2019, Oracle.  All rights reserved.


Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0

SQL> select instance_name,status from gv$insta^H
  2  
SQL> select instance_name,status from gv$Instance;

INSTANCE_NAME    STATUS
---------------- ------------
racdb1           OPEN
racdb2           OPEN

SQL> 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值