近期针对客户10.1.0.4的RAC平台做了个脚本升级到10.1.0.5
RAC环境:solaris 两个节点:clnode1 clnode2
步骤如下:
1. upgrade oracle software
1.1 shutdown database (run on clnode1)
shutdown instance on all nodes:
$ORACLE_HOME/bin/srvctl stop database -d db_name
1.2 shutdown all processes lisntener and CSS as root (run on clnode1)
#/etc/init.d/init.cssd stop
1.3 upgrade crs software and database software (run on clnode1) (note: need to make rac_crs.rsp and rac_db.rsp)
$./runInstaller -silent -responseFile rac_crs.rsp
$./runInstaller -silent -responseFile rac_db.rsp
1.4 run root.sh on all nodes
$ORACLE_HOME/root.sh
2. upgrade oracle database 10.1.0.4 to 10.1.0.5
2.1 run scripts on clnode1
SQL> ALTER SYSTEM SET CLUSTER_DATABASE=FALSE SCOPE=spfile;
SQL> SHUTDOWN IMMEDIATE
SQL> STARTUP UPGRADE
SQL> SPOOL patch.log
SQL> @?/rdbms/admin/catpatch.sql
SQL> SPOOL OFF
SQL>@?/rdbms/admin/utlrp.sql
SQL>ALTER SYSTEM SET CLUSTER_DATABASE=TRUE SCOPE=spfile;
2.2 run changePerm.sh on clnode1 and clnode2
$ORACLE_HOME/install/changePerm.sh
可执行ksh脚本如下:
#!/usr/bin/ksh
export ORACLE_HOME=/opt/oracle/server/10.1
export CRS_HOME=/opt/oracle/crs/10.1
SOURCE_DIR=`dirname $0`
SOURCE_DIR=`cd $SOURCE_DIR && pwd`
export SOURCE_DIR
BACKUP_DIR=/global/profiler5/activity/backup
export BACKUP_DIR
INSTALL_LOG_DIR=/var/tmp/prof5_install
export INSTALL_LOG_DIR
HISTORY_LOG=$INSTALL_LOG_DIR/install_history.log
export HISTORY_LOG
test ! -d $INSTALL_LOG_DIR && mkdir -p $INSTALL_LOG_DIR && chown root:other $INSTALL_LOG_DIR && chmod 777 $INSTALL_LOG_DIR
test -f $HISTORY_LOG && rm -f $HISTORY_LOG
touch $HISTORY_LOG
chown root:other $HISTORY_LOG
chmod 777 $HISTORY_LOG
# Copy all screen output to install_history.log file
# Thus all output can be found also from log file
tee -a $HISTORY_LOG >/dev/tty |&
exec 1>&p
# Redirect errout to stdout
exec 2>&1
HOSTNAME=`uname -n`
if [ "$HOSTNAME" = "" ];then
echo "ERROR: Could not resolve hostname with command "uname -n"...please restart the setup"
exit 1
fi
###############################################################################
#
# Functions
#
###############################################################################
#
# Test RSH connection in clustered installations.
#
test_rsh_connection() {
echo "Testing RSH connection...c"
rsh clnode2 "ls -la" >> /dev/null
if [ $? -eq 0 ]; then
echo "Done"
return 0
else
echo "ERROR: RSH connection to clnode2 is not working."
echo "ERROR: Please check /.rhosts and /etc/inet/inetd.conf files from clnode2."
exit 1
fi
}
#
# Set CLUSTER & CLUSTER_NODE_ID variables
#
/usr/sbin/clinfo > /dev/null 2>&1
if [ $? -eq 0 ]; then
CLUSTER=true
PATH=$PATH:/usr/cluster/bin
export PATH
CLUSTER_NODE_ID=`/usr/sbin/clinfo -n` > /dev/null
NODE_COUNT=`/usr/cluster/bin/scha_cluster_get -O ALL_NODEIDS | wc -l` > /dev/null
if [ $NODE_COUNT -eq 4 ]; then
echo "ERROR: This installation does not support installations in multinode."
exit 1
elif [ $CLUSTER_NODE_ID -ne 1 ]; then
echo "ERROR: Installation must be started in node 1"
exit 1
fi
echo "Clustered Solaris detected with node id $CLUSTER_NODE_ID"
test_rsh_connection
else
echo "Non clustered Solaris detected"
fi
###############################################################################
#
# Oracle shutdown
#
###############################################################################
echo "Shutting down all oracle instances"
echo "Stoppint database...c"
su - oracle -c "srvctl stop database -d nap3"
echo "Done."
echo "Stoppint nodeapps...c"
su - oracle -c "srvctl stop nodeapps -n clnode1"
echo ", clnode1 shutdown finishedc"
su - oracle -c "srvctl stop nodeapps -n clnode2"
echo ", clnode2 shutdown finished."
echo "Stoppint crs daemon..."
/etc/init.d/init.crs stop
rsh clnode2 "/etc/init.d/init.crs stop"
echo "Shutdown completed."
###############################################################################
#
#Backup Oracle Software
#
###############################################################################
test ! -d $BACKUP_DIR && mkdir -p $BACKUP_DIR
rsh clnode2 "test ! -d $BACKUP_DIR && mkdir -p $BACKUP_DIR"
echo "Backup crs software on clnode1...c"
tar -cvf $BACKUP_DIR/crs1.tar $CRS_HOME >>$HISTORY_LOG
echo "Done."
echo "Backup crs software on clnode2...c"
rsh clnode2 "tar -cvf $BACKUP_DIR/crs2.tar $CRS_HOME" >>$HISTORY_LOG
echo "Done."
echo "Backup oracle software on clnode1...c"
tar -cvf $BACKUP_DIR/server1.tar $ORACLE_HOME >>$HISTORY_LOG
echo "Done."
echo "Backup oracle software on clnode2...c"
rsh clnode2 "tar -cvf $BACKUP_DIR/server2.tar $ORACLE_HOME" >>$HISTORY_LOG
echo "Done."
#End of Backup Oracle Software
###############################################################################
#
# Oracle upgrade to 10.1.0.5.0
#
# ##############################################################################
#unpacking file and install patch
echo "Unpacking Oracle 10.1.0.5.0 patch....c"
su - oracle -c "cd /tmp; unzip -o $SOURCE_DIR/patchset/p4505133_10105_SOLARIS64.zip" >> $HISTORY_LOG
su - oracle -c "cd /tmp/Disk1/;./runInstaller -silent -responseFile $SOURCE_DIR/response/rac_crs.rsp"| tee -a /tmp/oracle10.1.0.5.log
echo "Starting to patch Oracle database home"
su - oracle -c "cd /tmp/Disk1/;./runInstaller -silent -responseFile $SOURCE_DIR/response/rac_db.rsp"| tee -a /tmp/oracle10.1.0.5.log
echo "Running db root.sh on clnode1"
$ORACLE_HOME/root.sh <
Y
Y
Y
@EOF
echo "Running db root.sh on clnode2"
rsh clnode2 "$ORACLE_HOME/root.sh <
Y
Y
Y
@EOF"
echo "Done"
#Verify if Oracle is updated successfully
ORA_VER=`su - oracle -c "sqlplus -v"|grep .|awk '{print $3}'`
if [ "$ORA_VER" != "10.1.0.5.0" ];then
echo "Oracle upgrade failed! Transaction abort !"
exit 1
else
echo "nOracle database software updated successfully! Need executing sqls..."
fi
#Start Upgraded Oracle 10.1.0.5
/etc/init.d/init.crs start
echo "Sleeping 300 seconds to wait crs startup"
sleep 300
su - oracle -c "srvctl start nodeapps -n clnode1"
su - oracle -c "srvctl start instance -d nap3 -i nap3x1"
su - oracle -c "sqlplus /nolog" <CONNECT / AS SYSDBA
ALTER SYSTEM SET CLUSTER_DATABASE=FALSE SCOPE=spfile;
SHUTDOWN IMMEDIATE
STARTUP UPGRADE
SPOOL patch.log
@$ORACLE_HOME/rdbms/admin/catpatch.sql
SPOOL OFF
SHUTDOWN IMMEDIATE
STARTUP
@$ORACLE_HOME/rdbms/admin/utlrp.sql
ALTER SYSTEM SET CLUSTER_DATABASE=TRUE SCOPE=spfile;
SHUTDOWN IMMEDIATE
STARTUP
@EOF
#Running changePerm.sh script on an Oracle database server home
su - oracle -c "cd $ORACLE_HOME/install;./changePerm.sh <y
@EOF"
rsh clnode2 "su - oracle -c "cd $ORACLE_HOME/install;./changePerm.sh <y
@EOF""
#Start All Oracle Service
su - oracle -c "srvctl start instance -d nap3 -i nap3x1"
rsh clnode2 "/etc/init.d/init.crs start"
echo "Waiting 300 seconds CRS startup..."
sleep 300
su - oracle -c "srvctl start nodeapps -n clnode2"
su - oracle -c "srvctl start instance -d nap3 -i nap3x2"
echo "Oracle Update Finished."
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/3898/viewspace-1007919/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/3898/viewspace-1007919/