数据库版本 19.3.0.0.0
集群软件BASE目录 /u01/app/grid
集群软件HOME目录 /u01/app/19.0.0/grid
数据库软件BASE目录 /u01/app/oracle
数据库软件HOME目录 /u01/app/oracle/product/19.3.0/db
添加两条私有网卡
groupadd -g 54321 oinstall
groupadd -g 54322 dba
groupadd -g 54325 asmadmin
groupadd -g 54323 asmdba
groupadd -g 54324 asmoper
groupadd -g 54327 backupdba
groupadd -g 54329 kmdba
groupadd -g 54326 oper
groupadd -g 54328 dgdba
groupadd -g 54330 racdba
useradd -u 5000 -g oinstall -G oinstall,dba,oper,backupdba,dgdba,racdba,kmdba,asmdba -m -d /home/oracle oracle
useradd -u 5001 -g oinstall -G oinstall,dba,racdba,asmadmin,asmdba,asmoper -m -d /home/grid grid
passwd oracle
passwd grid
grid 用户身份修改grid 用户环境变量:
cd
cat .bash_profile 编辑添加如下内容
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME= /u01/app/19.0.0/grid
export ORACLE_SID=+ASM1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export PATH=$ORACLE_HOME/bin:$ORACLE_HOME/OPatch:$PATH:.
stty erase ^H
umask 022
oracle 用户身份修改oracle 用户环境变量:
cd ~
cat .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export ORACLE_BASE= /u01/app/oracle
export ORACLE_HOME= /u01/app/oracle/product/19.3/db
export PATH=$ORACLE_HOME/bin:$PATH:$ORACLE_HOME/OPatch:$PATH:.
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/network/lib:/lib:/usr/lib
export ORACLE_SID=test1
export NLS_LANG=american_america.UTF8
stty erase ^H
umask 022
mkdir -p /u01/app/19.0.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
mkdir -p /u01/app/oracle/product/19/db
chown -R grid:oinstall /u01
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/
配置hosts文件
修改用户资源限制
# vi /etc/security/limits.conf 添加如下内容
cat >> /etc/security/limits.conf <<EOF
grid soft nproc 4096
grid hard nproc 16384
grid soft nofile 2047
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
oracle soft nproc 4096
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
grid soft memlock 1073741824
grid hard memlock 1073741824
oracle soft memlock 1073741824
oracle hard memlock 1073741824
EOF
修改profile 文件
vi /etc/profile 添加如下内容
cat >> /etc/profile <<EOF
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
EOF
配置kernal参数
cat >> /etc/sysctl.conf << EOF
kernel.sem = 250 32000 100 128
fs.file-max = 6815744
fs.aio-max-nr = 1048576
vm.dirty_background_ratio = 3
vm.dirty_ratio = 20
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
net.core.rmem_default = 262144
net.core.rmem_max = 10485760
net.core.wmem_default = 262144
net.core.wmem_max = 10485760
net.ipv4.ip_local_port_range = 9000 65500
vm.min_free_kbytes = 524288
#kernel.shmall kernel.shmmax kernel.shmmni 这三个参数不需要再设置,linux 7.4版本
sysctl –a |grep shm
Kernel.shmall=18446744073692774399
Kernel.shmmax=18446744073692774399
Kernel.shmmni=4096
net.ipv4.conf.ens19f0.rp_filter = 2 --私网
net.ipv4.conf.ens20f0.rp_filter = 2 --私网
net.ipv4.conf.bond0.rp_filter = 1 --公网
net.ipv4.ipfrag_low_thresh=15728640 ---15M
net.ipv4.ipfrag_high_thresh=16777216 ---16M
这个如果没设置的话,在dbca的时候会报以下错误: 只能起一个节点,不能起两个节点
No connectivity to other instances in the cluster during startup. Hence, LMON is terminating the instance. Please check the LMON trace file for details. Also, please check the network logs of this instance along with clusterwide network health for problems and then re-start this instance.
sysctl -p
检查大页(HugePages)配置是否生效
[root@ ]# cat /proc/meminfo | grep Hug
AnonHugePages: 0 kB
HugePages_Total: 412678
HugePages_Free: 1272
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB
其中,HugePages_Total非0说明已启用大页,大页总量需要 > SGA
systemctl status firewalld
systemctl is-enabled firewalld
systemctl stop firewalld
systemctl disable firewalld
[root@xxxx mapper]# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
关闭 Transparent HugePages
检查状态
#cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
# grep AnonHugePages /proc/meminfo
AnonHugePages: 0
# cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
# cat /sys/kernel/mm/transparent_hugepage/defrag
[always] madvise never
上述即代表关闭
关闭Transparent HugePages,临时生效
#echo never >/sys/kernel/mm/transparent_hugepage/enabled
关闭Transparent HugePages,永久生效
1.
Add the following lines in /etc/rc.local and reboot the server (this still can be done on Redhat 7 although rc.local is being deprecated):
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
2 将“transparent_hugepages = never” 添加到/ etc / default / grub的GRUB_CMDLINE_LINUX条目中,如下所示:
# /etc/default/grub
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=vg_rgdh18xcdb1/lv_root rd.lvm.lv= vg_rgdh18xcdb1/lv_swap transparent_hugepage=never hugepages=412678 elevator=noop"
GRUB_DISABLE_RECOVERY="true"
2. 备份GRUB配置文件并生成新文件
# cp -pv /boot/grub2/grub.cfg /boot/grub2/grub.cfg-bkp
# grub2-mkconfig -o /boot/grub2/grub.cfg
3. Reboot the server
# reboot
4. 验证
# cat /sys/kernel/mm/transparent_hugepage/enabled
always madvise [never]
透明HugePages在运行时由内核中的khugepaged线程动态设置,而常规HugePages必须在启动时预先分 配由于已知Transparent HugePages会导致RAC意外的节点重新启动和性能问题,
禁用avahi服务
systemctl stop avahi-daemon
systemctl disable avahi-daemon
systemctl status avahi-daemon
禁用RemoveIPC
vi /etc/systemd/logind.conf 编辑并添加如下内容
RemoveIPC=no
重启systemd-logind服务或重启主机
systemctl daemon-reload
systemctl restart systemd-logind
安装RPM 包
binutils-2.23.52.0.1-12.el7 (x86_64)
mos 文档号 Doc ID 1587357.1
compat-libcap1-1.10-3.el7 (x86_64)
compat-libstdc++-33-3.2.3-71.el7 (i686)
compat-libstdc++-33-3.2.3-71.el7 (x86_64)
glibc-2.17-36.el7 (i686)
glibc-2.17-36.el7 (x86_64)
glibc-devel-2.17-36.el7 (i686)
glibc-devel-2.17-36.el7 (x86_64)
ksh
libaio-0.3.109-9.el7 (i686)
libaio-0.3.109-9.el7 (x86_64)
libaio-devel-0.3.109-9.el7 (i686)
libaio-devel-0.3.109-9.el7 (x86_64)
libX11-1.6.0-2.1.el7 (i686)
libX11-1.6.0-2.1.el7 (x86_64)
libXau-1.0.8-2.1.el7 (i686)
libXau-1.0.8-2.1.el7 (x86_64)
libXi-1.7.2-1.el7 (i686)
libXi-1.7.2-1.el7 (x86_64)
libXtst-1.2.2-1.el7 (i686)
libXtst-1.2.2-1.el7 (x86_64)
libgcc-4.8.2-3.el7 (i686)
libgcc-4.8.2-3.el7 (x86_64)
libstdc++-4.8.2-3.el7 (i686)
libstdc++-4.8.2-3.el7 (x86_64)
libstdc++-devel-4.8.2-3.el7 (i686)
libstdc++-devel-4.8.2-3.el7 (x86_64)
libxcb-1.9-5.el7 (i686)
libxcb-1.9-5.el7 (x86_64)
make-3.82-19.el7 (x86_64)
nfs-utils-1.3.0-0.21.el7.x86_64 (for Oracle ACFS)
net-tools-2.0-0.17.20131004git.el7 (x86_64) (for Oracle RAC and Oracle Clusterware)
smartmontools-6.2-4.el7 (x86_64)
sysstat-10.1.5-1.el7 (x86_64)
Ensure that OpenSSH is installed on your servers. OpenSSH is the required SSH software.
bc
unixODBC-2.3.1 or later
配置NTP slewing option
vi /etc/sysconfig/ntp 编辑并添加如下内容
#NTPD_OPTIONS="-g -u ntp:ntp"
NTPD_OPTIONS="-x -g -u ntp:ntp"
重启NTP 服务,检查slewing option 是否生效
rcntpd restart
ps -ef | grep ntp ,检查输出结果是否含有"-x"
配置SSH等效性
也可以在安装的时候配置
如果没有要先解压grid的安装介质到grid_home目录下
配置udev 磁盘绑定
根据此遍官方文档提供的方法进行配置
How to set udev rules in OL7 related to ASM on multipath disks (Doc ID 2101679.1)
Step 1: Determine the UUID of the multipath disk meant for Oracle ASM use.
udevadm info --query=all --name=/dev/mapper/mpathq | grep -i DM_UUID
udevadm info --query=all --name=/dev/mapper/mpathp | grep -i DM_UUID
udevadm info --query=all --name=/dev/mapper/mpatha | grep -i DM_UUID
udevadm info --query=all --name=/dev/mapper/mpathi | grep -i DM_UUID
udevadm info --query=all --name=/dev/mapper/mpathj | grep -i DM_UUID
Step 2: Create a udev rule file /etc/udev/rules.d/96-asmmultipath.rules
# touch /etc/udev/rules.d/96-asmmultipath.rules
Step 3: Add below udev rule for a multipath device using it's DM_UUID value under the file /etc/udev/rules.d/96-asmmultipath.rules
# vi /etc/udev/rules.d/96-asmmultipath.rules添加以下内容
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
ACTION=="add|change", ENV{DM_UUID}=="mpath-xxxxxxxxxxxxxxxxxxxx", SYMLINK+="asmdb_xxxxx", GROUP="asmadmin", OWNER="grid", MODE="0660"
Step 4: To add or load Udev rules using the below commands.
# /sbin/udevadm control --reload-rules
# /sbin/udevadm trigger --type=devices --action=change
Step 5: Check file permissions on the disk paths.
# ls -l /dev/asm*
# ls -l /dev/dm*
….
lrwxrwxrwx 1 root root 5 Mar 12 14:45 /dev/asmdisk2 -> dm-15
brw-rw---- 1 grid asmadmin 253, 15 Mar 12 14:45 /dev/dm-15
….
检测磁盘是否可访问
# su - grid
sg_inq /dev/dm-20
sg_inq /dev/dm-11
dd if=/dev/dm-20 of=/tmp/1.txt bs=1M count=100
dd if=/dev/dm-11 of=/tmp/1.txt bs=1M count=100
sg_inq /dev/dm-20
dd if=/dev/dm-20 of=/tmp/1.txt bs=1M count=100
dd if=/dev/zero of=/dev/dm-20 bs=1M count=100
dd if=/dev/zero of=/dev/dm-11 bs=1M count=100
dd if=/dev/zero of=/dev/dm-17 bs=1M count=100
dd if=/dev/zero of=/dev/dm-22 bs=1M count=100
dd if=/dev/zero of=/dev/dm-14 bs=1M count=100
dd if=/dev/zero of=/dev/dm-8 bs=1M count=100
dd if=/dev/zero of=/dev/dm-21 bs=1M count=100
dd if=/dev/zero of=/dev/dm-9 bs=1M count=100
dd if=/dev/zero of=/dev/dm-10 bs=1M count=100
dd if=/dev/zero of=/dev/dm-19 bs=1M count=100
dd if=/dev/zero of=/dev/dm-12 bs=1M count=100
dd if=/dev/zero of=/dev/dm-18 bs=1M count=100
dd if=/dev/zero of=/dev/dm-16 bs=1M count=100
dd if=/dev/zero of=/dev/dm-15 bs=1M count=100
dd if=/dev/zero of=/dev/dm-7 bs=1M count=100
dd if=/dev/zero of=/dev/dm-23 bs=1M count=100
解压GI安装包
su - grid
$unzip /software/LINUX.X64_193000_grid_home.zip -d $GI_HOME
//注意:只需要把GRID 介质解压到某一个节点$GI_HOME目录中即可,另一个节点在安装脚本执行后会自动copy
2.24进行CVU检测
在所有节点安装
cd /u01/app/19.0.0/grid/cv/rpm
cp cvuqdisk-1.0.10-1.rpm /tmp
scp cvuqdisk-1.0.10-1.rpm kdrpt02:/tmp
cd /tmp
rpm -ivh cvuqdisk-1.0.10-1.rpm
进行检测:
/u01/app/19.0.0/grid/runcluvfy.sh stage -pre crsinst -n node1,node2 -verbose
2.25重启操作系统
安装软件以及补丁
asm 实例parameter 如下:
alter system set memory_max_target=0 scope=spfile sid='*';
alter system set memory_target=0 scope=spfile sid='*';
alter system set sga_target=4G scope=spfile sid='*';
alter system set sga_max_size=4G scope=spfile sid='*';
alter system set pga_aggregate_target=512M scope=spfile sid='*';
alter system set large_pool_size=256M scope=spfile sid='*';
alter system set processes=1000 scope=spfile sid='*';
db实例
alter system set "_index_partition_large_extents"=TRUE scope=spfile sid='*';
alter system set "_memory_imm_mode_without_autosga"=FALSE scope=spfile sid='*';
alter system set "_optimizer_ads_use_result_cache"=FALSE scope=spfile sid='*';
alter system set "_optimizer_extended_cursor_sharing"=NONE scope=spfile sid='*';
alter system set "_optimizer_extended_cursor_sharing_rel"=NONE scope=spfile sid='*';
alter system set "_optimizer_null_accepting_semijoin"=FALSE scope=spfile sid='*';
alter system set "_optimizer_partial_join_eval"=FALSE scope=spfile sid='*';
alter system set "_optimizer_reduce_groupby_key"=FALSE scope=spfile sid='*';
alter system set "_partition_large_extents"=FALSE scope=spfile sid='*';
alter system set "_clusterwide_global_transactions"=FALSE scope=spfile sid='*';
alter system set db_files=2048 scope=spfile sid='*';
alter system set event='28401 trace name context forever,level 1','10949 trace name context forever,level 1', scope=spfile sid='*';
alter system set "_use_adaptive_log_file_sync"=FALSE scope=spfile sid='*';
alter system set "_use_single_log_writer"=TRUE scope=spfile sid='*';
alter system set "_datafile_write_errors_crash_instance"=FALSE scope=spfile sid='*';
alter system set sga_target=800G scope=spfile sid='*';
alter system set sga_max_size=800G scope=spfile sid='*';
alter system set pga_aggregate_target=200G scope=spfile sid='*';
alter system set shared_pool_size=120G scope=spfile sid='*';
alter system set "_lm_tickets"=5000 scope=spfile sid='*';
alter system set "_gc_policy_time"=0 scope=spfile sid='*';
alter system set "_gc_undo_affinity"=FALSE scope=spfile sid='*';
alter system set gcs_server_processes=128 scope=spfile sid='*';
alter system set parallel_force_local=TRUE scope=spfile sid='*';
alter system set parallel_max_servers=240 scope=spfile sid='*';
alter system set open_cursors=1000 scope=spfile sid='*';
alter system set session_cached_cursors=100 scope=spfile sid='*';
alter system set open_links=10 scope=spfile sid='*';
alter system set open_links_per_instance=10 scope=spfile sid='*';
alter system set "_undo_autotune"=FALSE scope=spfile sid='*';
alter system set undo_retention=14400 scope=spfile sid='*';
alter system set audit_trail=NONE scope=spfile sid='*';
alter system set enable_ddl_logging=TRUE scope=spfile sid='*';
alter system set result_cache_max_size=0 scope=spfile sid='*';
alter system set large_pool_size=256M scope=spfile sid='*';
alter profile default limit password_life_time unlimited;
alter system set use_large_pages=TRUE scope=spfile sid='*';
alter database add supplemental log data(primary key,unique index) columns;
alter database add supplemental log data;
alter database force logging;
alter system set enable_goldengate_replication=true scope=both sid='*';
alter system set streams_pool_size=20480M scope=spfile sid='*';