数据库规划
1、操作系统:Oracle Enterprise Linux 7.6,内存8G
2、集群软件:Grid Insfrastructer 19.3.0
3、数据库软件:Database 19.3.0
4、数据库名:cwdb
5、实例名:cwdb1/cwdb2
6、数据块大小:8192
7、数据库字符集:UTF8
网络规划
主机名 |
IP |
IP类型 |
网关 |
网卡 |
cwdb01 |
192.168.6.101 |
public ip |
192.168.6.1 |
eth0 |
cwdb02 |
192.168.6.102 |
public ip |
eth0 |
|
cwdb01-priv |
10.0.0.101 |
private ip |
10.0.0.1 |
eth1 |
cwdb02-priv |
10.0.0.102 |
private ip |
eth1 |
|
cwdb01-vip |
192.168.6.103 |
virtual ip |
192.168.6.1 |
eth2 |
cwdb02-vip |
192.168.6.104 |
virtual ip |
eth2 |
|
cwdb-scan |
192.168.6.105 |
scan ip |
192.168.6.1 |
存储规划
磁盘组 |
OS |
ORACLE |
+DGOCR |
+DGDATA |
+DGFRA |
存储类型 |
文件系统 |
文件系统 |
ASM |
ASM |
ASM |
磁盘组容量 |
100G |
200G |
9G |
8G |
4G |
磁盘数量 |
1 |
1 |
3 |
2 |
1 |
每个磁盘大小 |
100G |
200G |
3G |
4G |
4G |
关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl stop NetworkManager.service
systemctl disable NetworkManager.service
禁用selinux
sed -n '/SELINUX=enforcing/c SELINUX=disabled' /etc/sysconfig/selinux
修改hosts配置文件
vim /etc/hosts
#public ip
192.168.6.101 cwdb01
192.168.6.102 cwdb02
#private ip
10.0.0.101 cwdb01-priv
10.0.0.102 cwdb02-priv
#virtual ip
192.168.6.103 cwdb01-vip
192.168.6.104 cwdb02-vip
#scan ip
192.168.6.105 cluster-scan
创建组、用户、目录
1、创建组
groupadd -g 54321 oinstall
groupadd -g 54322 dba
groupadd -g 54323 oper
groupadd -g 54324 asmdba
groupadd -g 54325 asmoper
groupadd -g 54326 asmadmin
groupadd -g 54327 backupdba
groupadd -g 54328 dgdba
groupadd -g 54329 kmdba
2、创建用户oracle和grid
useradd -u 54321 -g oinstall -G dba,oper,asmdba,backupdba,dgdba,kmdba oracle
echo "oracle" | passwd --stdin oracle
useradd -u 54322 -g oinstall -G dba,asmdba,asmoper,asmadmin grid
echo "oracle" | passwd --stdin grid
3、创建目录
mkdir -p /u01/app/grid
mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/oracle
mkdir -p /u01/app/oracle/product/19.3.0/dbhome_1
chown -R grid:oinstall /u01
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01
配置yum源
1、挂载光盘
mount /dev/sr0 /mnt
cd /etc/yum.repos.d/
mv * /root
2、创建yum源
vim yum.repo
cat<<EOF>>yum.repo
[yum]
name=yum
baseurl=file:///mnt
gpgcheck=0
enabled=1
EOF
3、清理缓存
yum clean all
yum makecache
yum repolist
4、安装测试
yum install xorg-x11-apps.x86_64 -y
5、执行xclock如果出现时钟说明安装成功
修改资源限制参数
cat<<EOF>>/etc/security/limits.conf
grid soft nproc 16384
grid hard nproc 16384
grid soft nofile 65536
grid hard nofile 65536
grid soft stack 32768
grid hard stack 32768
oracle soft nproc 16384
oracle hard nproc 16384
oracle soft nofile 65536
oracle hard nofile 65536
oracle soft stack 32768
oracle hard stack 32768
oracle soft memlock 6000000
oracle hard memlock 6000000
EOF
ulimit -a
控制给用户分配的资源
echo "session required pam_limits.so" >> /etc/pam.d/login
修改内核参数
cat<<EOF>>/etc/sysctl.conf
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
net.ipv4.ip_forward = 1
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
kernel.panic_on_oops = 1
kernel.shmmax = 5033164800
kernel.shmall = 1228800
kernel.shmmni = 4096
vm.nr_hugepages = 2500
EOF
让参数生效
sysctl -p
nproc参数
echo "* soft nproc 16384">>/etc/security/limits.d/90-nproc.conf
关闭透明大页
开启大内存后,需要关闭透明大页
cat<<EOF>>/etc/rc.d/rc.local
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
EOF
chmod +x /etc/rc.d/rc.local
关闭numa功能
yum install numactl -y
vim /etc/default/grub
GRUB_CMDLINE_LINUX="rhgb quiet numa=off"
重新编译
grub2-mkconfig -o /etc/grub2.cfg
共享内存段/dev/shm
mount -o remount,size=8g /dev/shm/
修改时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
hwclock
配置NOZEROCONF
防止169.254.0.0/255.255.0.0自动加入到路由表(Doc ID 1161144.1)
vim /etc/sysconfig/network
NOZEROCONF=yes
cat<<EOF>>/etc/sysconfig/network
NOZEROCONF=yes
EOF
修改nsswitch.conf
IS可能会引起scan域名解析异常
vim /etc/nsswitch.conf
hosts: files dns myhostname
改为
hosts: files dns myhostname nis
关闭avahi-daemon守护进程
systemctl stop avahi-daemon.socket avahi-daemon.service
systemctl disable avahi-daemon.socket avahi-daemon.service
禁用NTP
systemctl stop ntpdate
systemctl disable ntpdate
配置grid用户环境变量
su - grid
vim .bash_profile
PS1="[`whoami`@`hostname`:"'$PWD]$ '
export PS1
umask 022
#alias sqlplus="rlwrap sqlplus"
export TMP=/tmp
export LANG=en_US.UTF8
export TMPDIR=$TMP
export TZ=Asia/Shanghai
ORACLE_SID=+ASM; export ORACLE_SID
ORACLE_TERM=xterm; export ORACLE_TERM
ORACLE_BASE=/u01/app/grid; export ORACLE_BASE
ORACLE_HOME=/u01/app/19.3.0/grid; export ORACLE_HOME
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
PATH=$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
配置oracle用户环境变量
su - oracle
vim .bash_profile
PS1="[`whoami`@`hostname`:"'$PWD]$ '
#alias sqlplus="rlwrap sqlplus"
#alias rman="rlwrap rman"
export PS1
export TMP=/tmp
export LANG=en_US.UTF8
export TMPDIR=$TMP
export TZ=Asia/Shanghai
export ORACLE_UNQNAME=cwdb
ORACLE_SID=cwdb; export ORACLE_SID
ORACLE_BASE=/u01/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/19.3.0/dbhome_1; export ORACLE_HOME
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.UTF8;export NLS_LANG
PATH=$PATH:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
添加磁盘
两台虚拟机都要添加以下配置信息
diskLib.dataCacheMaxSize = "0"
diskLib.dataCacheMaxReadAheadSize = "0"
diskLib.DataCacheMinReadAheadSize = "0"
diskLib.dataCachePageSize = "4096"
diskLib.maxUnsyncedWrites = "0"
##参数必须设为TRUE,否则UUDI获取失败
disk.EnableUUID="TRUE"
disk.locking = "FALSE"
####注意是scsi0不是scsi1,否则ASM肯定有问题
scsi0.sharedBus = "virtual"
scsi1节点2执行root脚本报错,找不到共享磁盘dgocr
判断scsi0还是scsi1
通过创建的磁盘虚拟设备节点来确定
磁盘分区规划
1、节点cwdb01
[root@cwdb01 ~]# fdisk -l | grep /dev/sd | grep "3221 MB"
Disk /dev/sdd: 3221 MB, 3221225472 bytes, 6291456 sectors
Disk /dev/sdc: 3221 MB, 3221225472 bytes, 6291456 sectors
Disk /dev/sde: 3221 MB, 3221225472 bytes, 6291456 sectors
[root@cwdb01 ~]# fdisk -l | grep /dev/sd | grep "6442 MB"
Disk /dev/sdf: 6442 MB, 6442450944 bytes, 12582912 sectors
[root@cwdb01 ~]# fdisk -l | grep /dev/sd | grep "4294 MB"
Disk /dev/sdg: 4294 MB, 4294967296 bytes, 8388608 sectors
[root@cwdb01 ~]# fdisk -l | grep /dev/sd | grep "5368 MB"
Disk /dev/sdh: 5368 MB, 5368709120 bytes, 10485760 sectors
2、节点cwdb02
[root@cwdb02 ~]# fdisk -l | grep /dev/sd | grep "3221 MB"
Disk /dev/sdc: 3221 MB, 3221225472 bytes, 6291456 sectors
Disk /dev/sdd: 3221 MB, 3221225472 bytes, 6291456 sectors
Disk /dev/sde: 3221 MB, 3221225472 bytes, 6291456 sectors
[root@cwdb02 ~]# fdisk -l | grep /dev/sd | grep "6442 MB"
Disk /dev/sdf: 6442 MB, 6442450944 bytes, 12582912 sectors
[root@cwdb02 ~]# fdisk -l | grep /dev/sd | grep "4294 MB"
Disk /dev/sdg: 4294 MB, 4294967296 bytes, 8388608 sectors
[root@cwdb02 ~]# fdisk -l | grep /dev/sd | grep "5368 MB"
Disk /dev/sdh: 5368 MB, 5368709120 bytes, 10485760 sectors
配置udev
1、查看配置文件
[root@cwdb01 ~]# ll -h /etc/scsi_id.config
ls: cannot access /etc/scsi_id.config: No such file or directory
2、创建配置文件
[root@cwdb01 ~]# echo "options=--whitelisted --replace-whitespace">>/etc/scsi_id.config
[root@cwdb02 ~]# echo "options=--whitelisted --replace-whitespace">>/etc/scsi_id.config
3、获取UUID
[root@cwdb01 ~]# for i in c d e;
> do
> echo "sd$i" "`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`";
> done
sdc 36000c295a1cb4a415ff58c9aafb4b819
sdd 36000c2953fcc4a1939d9f3e5799e54f3
sde 36000c29f394a3c1202c9e3706fc77444
[root@cwdb02 ~]# for i in c d e;
> do
> echo "sd$i" "`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`";
> done
sdc 36000c295a1cb4a415ff58c9aafb4b819
sdd 36000c2953fcc4a1939d9f3e5799e54f3
sde 36000c29f394a3c1202c9e3706fc77444
4、生成udev路径
[root@cwdb01 ~]# for i in c d e;
> do
> echo "KERNEL==\"sd?\",SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\",RESULT==\"`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", SYMLINK+=\"asm-disk$i\",OWNER=\"grid\", GROUP=\"asmadmin\",MODE=\"0660\""
> done
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c295a1cb4a415ff58c9aafb4b819", SYMLINK+="asm-diskc",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c2953fcc4a1939d9f3e5799e54f3", SYMLINK+="asm-diskd",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c29f394a3c1202c9e3706fc77444", SYMLINK+="asm-diske",OWNER="grid", GROUP="asmadmin",MODE="0660"
[root@cwdb02 ~]# for i in c d e;
> do
> echo "KERNEL==\"sd?\",SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\",RESULT==\"`/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", SYMLINK+=\"asm-disk$i\",OWNER=\"grid\", GROUP=\"asmadmin\",MODE=\"0660\""
> done
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c295a1cb4a415ff58c9aafb4b819", SYMLINK+="asm-diskc",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c2953fcc4a1939d9f3e5799e54f3", SYMLINK+="asm-diskd",OWNER="grid", GROUP="asmadmin",MODE="0660"
KERNEL=="sd?",SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name",RESULT=="36000c29f394a3c1202c9e3706fc77444", SYMLINK+="asm-diske",OWNER="grid", GROUP="asmadmin",MODE="0660"
[root@cwdb01 ~]# for i in f g h;
> do
> echo "KERNEL==\"sd?\",SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id