oracle10g rac常用命令整理

--查看节点信息
[oracle@rac1 ~]$ cd $CRS_HOME
[oracle@rac1 crs]$ pwd
/opt/ora10g/product/crs
[oracle@rac1 crs]$ cd bin/
[oracle@rac1 bin]$ ./olsnodes -n -p -i
rac1    1       rac1-priv       rac1-vip
rac2    2       rac2-priv       rac2-vip

--显示网口列表
[oracle@rac1 ~]$ oifcfg iflist
eth0  192.168.137.0
eth1  192.168.136.0

--查看每个网卡的属性
[oracle@rac1 ~]$ oifcfg getif
eth0  192.168.137.0  global  public
eth1  192.168.136.0  global  cluster_interconnect

[oracle@rac1 ~]$ oifcfg getif -global rac2
eth0  192.168.137.0  global  public
eth1  192.168.136.0  global  cluster_interconnect

[oracle@rac1 ~]$ oifcfg getif -node rac2

[oracle@rac1 ~]$ oifcfg getif -type cluster_interconnect
eth1  192.168.136.0  global  cluster_interconnect

--删除网络接口
[oracle@rac1 ~]$ oifcfg delif -global
[oracle@rac1 ~]$ oifcfg getif -global

--增加网络接口
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.137.0:public
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -global
eth0  192.168.137.0  global  public
eth0  192.168.136.0  global  cluster_interconnect

--检查CRS
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy

--检查服务
[oracle@rac1 ~]$ crsctl check cssd
CSS appears healthy
[oracle@rac1 ~]$ crsctl check crsd
CRS appears healthy
[oracle@rac1 ~]$ crsctl check evmd
EVM appears healthy

--关闭CRS自动重启
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl disable crs
--开启CRS自动重启
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl enable crs

--关闭CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--开启CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
[oracle@rac1 ~]$ crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy

--查看vote disk位置
[oracle@rac1 ~]$ crsctl query css votedisk
 0.     0    /dev/raw/raw2

located 1 votedisk(s).

--查看节点间延迟时间
--查看disk heartbeat
[oracle@rac1 ~]$ crsctl get css disktimeout
unrecognized parameter disktimeout specified.
--查看network heartbeat
[oracle@rac1 ~]$ crsctl get css misscount
60
--crsctl set css misscount 100

--查看各个服务模块
[oracle@rac1 ~]$ crsctl lsmodules crs
The following are the CRS modules ::
    CRSUI
    CRSCOMM
    CRSRTI
    CRSMAIN
    CRSPLACE
    CRSAPP
    CRSRES
    CRSCOMM
    CRSOCR
    CRSTIMER
    CRSEVT
    CRSD
    CLUCLS
    CSSCLNT
    COMMCRS
    COMMNS
[oracle@rac1 ~]$ crsctl lsmodules css
The following are the CSS modules ::
    CSSD
    COMMCRS
    COMMNS
[oracle@rac1 ~]$ crsctl lsmodules evm
The following are the EVM modules ::
   EVMD
   EVMDMAIN
   EVMCOMM
   EVMEVT
   EVMAPP
   EVMAGENT
   CRSOCR
   CLUCLS
   CSSCLNT
   COMMCRS
   COMMNS
   
--添加votedisk(一般添加2个)
    --查看votedisk位置
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
    0.     0    /dev/raw/raw2

    located 1 votedisk(s).
    --停止所有CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
    Stopping resources.
    Successfully stopped CRS resources
    Stopping CSSD.
    Shutting down CSS daemon.
    Shutdown request successfully issued.
    --添加votedisk
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3
    Cluster is not in a ready state for online disk addition
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3 -force
    Now formatting voting disk: /dev/raw/raw3
    successful addition of votedisk /dev/raw/raw3.
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw4 -force
    Now formatting voting disk: /dev/raw/raw4
    successful addition of votedisk /dev/raw/raw4.

    --确认添加后结果
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
    0.     0    /dev/raw/raw2
    1.     0    /dev/raw/raw4
    2.     0    /dev/raw/raw4
   
    located 3 votedisk(s).
    --开启CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
    Attempting to start CRS stack
    The CRS stack will be started shortly
    --查看结果
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    Failure 1 contacting CSS daemon
    Cannot communicate with CRS
    Cannot communicate with EVM
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    CSS appears healthy
    CRS appears healthy
    EVM appears healthy

   
   
--查看OCR配置文件位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
local_only=FALSE
   
--查看OCR备份
[oracle@rac1 ~]$ ocrconfig -showbackup

rac1     2012/09/07 14:20:08     /opt/ora10g/product/crs/cdata/crs

rac1     2012/09/06 18:53:34     /opt/ora10g/product/crs/cdata/crs

rac1     2012/09/06 14:53:33     /opt/ora10g/product/crs/cdata/crs

rac1     2012/09/05 15:24:48     /opt/ora10g/product/crs/cdata/crs

rac1     2012/09/05 15:24:48     /opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# pwd
/opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# ls -ltr
total 23568
-rw-r--r-- 1 root root 4018176 Sep  5 15:24 week.ocr
-rw-r--r-- 1 root root 4018176 Sep  5 15:24 day.ocr
-rw-r--r-- 1 root root 4018176 Sep  6 14:53 backup02.ocr
-rw-r--r-- 1 root root 4018176 Sep  6 14:53 day_.ocr
-rw-r--r-- 1 root root 4018176 Sep  6 18:53 backup01.ocr
-rw-r--r-- 1 root root 4018176 Sep  7 14:20 backup00.ocr

--检查OCR一致性
[oracle@rac1 ~]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
         Version                  :          2
         Total space (kbytes)     :     196504
         Used space (kbytes)      :       3808
         Available space (kbytes) :     192696
         ID                       : 1464966774
         Device/File Name         : /dev/raw/raw1
                                    Device/File integrity check succeeded

                                    Device/File not configured

         Cluster registry integrity check succeeded
         
--OCR备份恢复实例
    --查看OCR配置文件位置
    [oracle@rac2 oracle]$ pwd
    /etc/oracle
    [oracle@rac2 oracle]$ more ocr.loc
    ocrconfig_loc=/dev/raw/raw1
    local_only=FALSE
    --停止所有CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
    Stopping resources.
    Successfully stopped CRS resources
    Stopping CSSD.
    Shutting down CSS daemon.
    Shutdown request successfully issued.
    --导出OCR
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -export /tmp/ocr.exp
    --重启CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
    Attempting to start CRS stack
    The CRS stack will be started shortly
    --检查CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    CSS appears healthy
    CRS appears healthy
    EVM appears healthy
    --破坏OCR
    [root@rac1 ~]# dd if=/dev/zero f=/dev/raw/raw1 bs=2048 count=1024000
    dd: writing `/dev/raw/raw1': No space left on device
    98297+0 records in
    98296+0 records out
    201310208 bytes (201 MB) copied, 332.386 seconds, 606 kB/s
    --检查一致性
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
    PROT-601: Failed to initialize ocrcheck
    --恢复备份的OCR
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -import /tmp/ocr.exp
    --检查一致性
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
    Status of Oracle Cluster Registry is as follows :
         Version                  :          2
         Total space (kbytes)     :     196504
         Used space (kbytes)      :       3816
         Available space (kbytes) :     192688
         ID                       : 1283814407
         Device/File Name         : /dev/raw/raw1
                                    Device/File integrity check succeeded

                                    Device/File not configured

         Cluster registry integrity check succeeded
    --启动CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
    Attempting to start CRS stack
    The CRS stack will be started shortly
    --检查CRS状态
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    CSS appears healthy
    CRS appears healthy
    EVM appears healthy
         
--移动OCR文件(OCR只能<=2个,一主一镜像)
    --查看OCR配置文件位置
    [oracle@rac2 oracle]$ pwd
    /etc/oracle
    [oracle@rac2 oracle]$ more ocr.loc
    ocrconfig_loc=/dev/raw/raw1
    --备份OCR
    --查看当前OCR配置
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
    Status of Oracle Cluster Registry is as follows :
         Version                  :          2
         Total space (kbytes)     :     196504
         Used space (kbytes)      :       3820
         Available space (kbytes) :     192684
         ID                       : 1283814407
         Device/File Name         : /dev/raw/raw1
                                    Device/File integrity check succeeded

                                    Device/File not configured

         Cluster registry integrity check succeeded
    --镜像当前OCR
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocrmirror /dev/raw/raw3
    --查看镜像结果
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
    Status of Oracle Cluster Registry is as follows :
         Version                  :          2
         Total space (kbytes)     :     196504
         Used space (kbytes)      :       3820
         Available space (kbytes) :     192684
         ID                       : 1283814407
         Device/File Name         : /dev/raw/raw1
                                    Device/File integrity check succeeded
         Device/File Name         : /dev/raw/raw3
                                    Device/File integrity check succeeded

         Cluster registry integrity check succeeded

    --改变primary ocr文件位置
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocr /dev/raw/raw4
    --查看OCR配置信息
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
    Status of Oracle Cluster Registry is as follows :
         Version                  :          2
         Total space (kbytes)     :    4938664
         Used space (kbytes)      :       3820
         Available space (kbytes) :    4934844
         ID                       : 1283814407
         Device/File Name         : /dev/raw/raw4
                                    Device/File needs to be synchronized with the other device
         Device/File Name         : /dev/raw/raw3
                                    Device/File integrity check succeeded

         Cluster registry integrity check succeeded
    --查看OCR配置文件内容是否更新
    [root@rac1 ~]# cat /etc/oracle/ocr.loc
    #Device/file /dev/raw/raw1 getting replaced by device /dev/raw/raw4
    ocrconfig_loc=/dev/raw/raw4
    ocrmirrorconfig_loc=/dev/raw/raw3
   
   
   
--查看资源状态

[oracle@rac1 ~]$ crs_stat -t -v
Name           Type           R/RA   F/FT   Target    State     Host       
----------------------------------------------------------------------
ora....SM1.asm application    0/5    0/0    ONLINE    ONLINE    rac1       
ora....C1.lsnr application    0/5    0/0    ONLINE    ONLINE    rac1       
ora.rac1.gsd   application    0/5    0/0    ONLINE    ONLINE    rac1       
ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1       
ora.rac1.vip   application    0/0    0/0    ONLINE    ONLINE    rac1       
ora....SM2.asm application    0/5    0/0    ONLINE    ONLINE    rac2       
ora....C2.lsnr application    0/5    0/0    ONLINE    ONLINE    rac2       
ora.rac2.gsd   application    0/5    0/0    ONLINE    ONLINE    rac2       
ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2       
ora.rac2.vip   application    0/0    0/0    ONLINE    ONLINE    rac2       
ora.racdb.db   application    0/1    0/1    ONLINE    ONLINE    rac1       
ora....b1.inst application    0/5    0/0    ONLINE    ONLINE    rac1       
ora....b2.inst application    0/5    0/0    ONLINE    ONLINE    rac2    

--查看某资源详细信息
[oracle@rac1 ~]$ crs_stat -p ora.rac2.vip
NAME=ora.rac2.vip
TYPE=application
ACTION_SCRIPT=/opt/ora10g/product/crs/bin/racgwrap
ACTIVE_PLACEMENT=1
AUTO_START=1
CHECK_INTERVAL=60
DESCRIPTION=CRS application for VIP on a node
FAILOVER_DELAY=0
FAILURE_INTERVAL=0
FAILURE_THRESHOLD=0
HOSTING_MEMBERS=rac2
OPTIONAL_RESOURCES=
PLACEMENT=favored
REQUIRED_RESOURCES=
RESTART_ATTEMPTS=0
SCRIPT_TIMEOUT=60
START_TIMEOUT=0
STOP_TIMEOUT=0
UPTIME_THRESHOLD=7d
USR_ORA_ALERT_NAME=
USR_ORA_CHECK_TIMEOUT=0
USR_ORA_CONNECT_STR=/ as sysdba
USR_ORA_DEBUG=0
USR_ORA_DISCONNECT=false
USR_ORA_FLAGS=
USR_ORA_IF=eth0
USR_ORA_INST_NOT_SHUTDOWN=
USR_ORA_LANG=
USR_ORA_NETMASK=255.255.255.0
USR_ORA_OPEN_MODE=
USR_ORA_OPI=false
USR_ORA_PFILE=
USR_ORA_PRECONNECT=none
USR_ORA_SRV=
USR_ORA_START_TIMEOUT=0
USR_ORA_STOP_MODE=immediate
USR_ORA_STOP_TIMEOUT=0
USR_ORA_VIP=192.168.137.154

--查看资源权限信息
[oracle@rac1 ~]$ crs_stat -ls
Name           Owner          Primary PrivGrp          Permission 
-----------------------------------------------------------------
ora....SM1.asm oracle         oinstall                 rwxrwxr--
ora....C1.lsnr oracle         oinstall                 rwxrwxr--
ora.rac1.gsd   oracle         oinstall                 rwxr-xr--
ora.rac1.ons   oracle         oinstall                 rwxr-xr--
ora.rac1.vip   root           oinstall                 rwxr-xr--
ora....SM2.asm oracle         oinstall                 rwxrwxr--
ora....C2.lsnr oracle         oinstall                 rwxrwxr--
ora.rac2.gsd   oracle         oinstall                 rwxr-xr--
ora.rac2.ons   oracle         oinstall                 rwxr-xr--
ora.rac2.vip   root           oinstall                 rwxr-xr--
ora.racdb.db   oracle         oinstall                 rwxrwxr--
ora....b1.inst oracle         oinstall                 rwxrwxr--
ora....b2.inst oracle         oinstall                 rwxrwxr--

--查看ONS配置信息
[oracle@rac1 conf]$ pwd
/opt/ora10g/product/crs/opmn/conf
[oracle@rac1 conf]$ cat ons.config
localport=6113                 --本地(127.0.0.1)监听端口
remoteport=6200             --远程(除127.0.0.1)监听端口
loglevel=3
useocr=on

--查看端口情况
[oracle@rac1 conf]$ netstat -ano|grep 6113
tcp        0      0 127.0.0.1:6113              0.0.0.0:*                   LISTEN      off (0.00/0/0)
tcp        0      0 127.0.0.1:1146              127.0.0.1:6113              ESTABLISHED keepalive (6092.43/0/0)
tcp        0      0 127.0.0.1:6113              127.0.0.1:1154              ESTABLISHED off (0.00/0/0)
tcp        0      0 127.0.0.1:6113              127.0.0.1:1146              ESTABLISHED off (0.00/0/0)
tcp        0      0 127.0.0.1:1154              127.0.0.1:6113              ESTABLISHED keepalive (6209.37/0/0)
[oracle@rac1 conf]$ netstat -ano|grep 6200
tcp        0      0 0.0.0.0:6200                0.0.0.0:*                   LISTEN      off (0.00/0/0)
tcp        0      0 192.168.137.151:6200        192.168.137.152:18512       ESTABLISHED off (0.00/0/0)

--查看ONS进程
[oracle@rac1 ~]$ ps -ef | grep ons
root      3772     1  0 10:06 ?        00:00:00 sendmail: accepting connections
oracle    6647 13385  0 10:42 pts/0    00:00:00 grep ons
oracle   17466     1  0 10:20 ?        00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d
oracle   17468 17466  0 10:20 ?        00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d

--查看ONS运行状态
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...

--ONS的启动和停止
[oracle@rac1 ~]$ onsctl stop
onsctl: shutting down ons daemon ...
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
ons is not running ...
[oracle@rac1 ~]$ onsctl start
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
onsctl: ons started
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...

--查看ONS详细信息
[oracle@rac1 ~]$ onsctl debug
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
   {node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
   {node = rac2, port = 6200}
Adding remote host rac2:6200
HTTP/1.1 200 OK
Content-Length: 1357
Content-Type: text/html
Response:


======== NS ========

Listeners:

 NAME    BIND ADDRESS   PORT   FLAGS   SOCKET
------- --------------- ----- -------- ------
Local   127.000.000.001  6113 00000142      7
Remote  192.168.137.151  6200 00000101      8
Request     No listener

Server connections:

    ID           IP        PORT    FLAGS    SENDQ     WORKER   BUSY  SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
         1 192.168.137.152  6200 00010005          0               1     0

Client connections:

    ID           IP        PORT    FLAGS    SENDQ     WORKER   BUSY  SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
         2 127.000.000.001  6113 0001001a          0               1     1
         3 127.000.000.001  6113 0001001a          0               1     0

Pending connections:

    ID           IP        PORT    FLAGS    SENDQ     WORKER   BUSY  SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
         0 127.000.000.001  6113 00020812          0               1     0

Worker Ticket: 1/1, Idle: 361

   THREAD   FLAGS
  -------- --------
  b7868b90 00000012
  b7067b90 00000012
  b66abb90 00000012

Resources:

  Notifications:
    Received: 0, in Receive Q: 0, Processed: 0, in Process Q: 0

  Pools:
    Message: 24/25 (1), Link: 25/25 (1), Subscription: 24/25 (1)

--查看OCR中注册的数据库信息
[oracle@rac1 ~]$ srvctl config database
racdb
[oracle@rac1 ~]$ srvctl config database -d racdb
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY:  AUTOMATIC
ENABLE FLAG: DB ENABLED

--查看某节点信息
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1
rac1 racdb1 /opt/ora10g/product/database

--查看VIP信息
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -a
VIP exists.: /rac1-vip/192.168.137.153/255.255.255.0/eth0

--查看GSD
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -g
GSD exists.

--查看ONS
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -s
ONS daemon exists.

--查看监听
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -l
Listener exists.

--查看监听
[oracle@rac1 ~]$ srvctl config listener -n rac1
rac1 LISTENER_RAC1

--查看ASM
[oracle@rac1 ~]$ srvctl config asm -n rac2
+ASM2 /opt/ora10g/product/database

--配置数据库随CRS自动启动
[oracle@rac1 ~]$ srvctl disable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY:  MANUAL
ENABLE FLAG: DB DISABLED, INST DISABLED ON racdb1 racdb2
[oracle@rac1 ~]$ srvctl enable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY:  AUTOMATIC
ENABLE FLAG: DB ENABLED

--查看CRS当前版本
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl query crs softwareversion
CRS software version on node [rac2] is [10.2.0.5.0]

--配置某个实例随CRS重启
[oracle@rac1 ~]$ srvctl disable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY:  AUTOMATIC
ENABLE FLAG: DB ENABLED, INST DISABLED ON racdb2
[oracle@rac1 ~]$ srvctl enable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY:  AUTOMATIC
ENABLE FLAG: DB ENABLED

--操作某一实例(停止,启动,)
[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2   

[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2 -o immediate
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o nomount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o mount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o open
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2

--OCR和Votedisk损坏恢复(无备份)
    --停止所有节点的CRS
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
    Stopping resources.
    Successfully stopped CRS resources
    Stopping CSSD.
    Shutting down CSS daemon.
    Shutdown request successfully issued.
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    Failure 1 contacting CSS daemon
    Cannot communicate with CRS
    Cannot communicate with EVM
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
    Stopping resources.
    Successfully stopped CRS resources
    Stopping CSSD.
    Shutting down CSS daemon.
    Shutdown request successfully issued.
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
    Failure 1 contacting CSS daemon
    Cannot communicate with CRS
    Cannot communicate with EVM
   
    --执行删除脚本
    [root@rac1 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
    Shutting down Oracle Cluster Ready Services (CRS):
    Stopping resources.
    Error while stopping resources. Possible cause: CRSD is down.
    Stopping CSSD.
    Unable to communicate with the CSS daemon.
    Shutdown has begun. The daemons should exit soon.
    Checking to see if Oracle CRS stack is down...
    Oracle CRS stack is not running.
    Oracle CRS stack is down now.
    Removing script. for Oracle Cluster Ready services
    Updating ocr file for downgrade
    Cleaning up SCR settings in '/etc/oracle/scls_scr'
    [root@rac2 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
    Shutting down Oracle Cluster Ready Services (CRS):
    Stopping resources.
    Error while stopping resources. Possible cause: CRSD is down.
    Stopping CSSD.
    Unable to communicate with the CSS daemon.
    Shutdown has begun. The daemons should exit soon.
    Checking to see if Oracle CRS stack is down...
    Oracle CRS stack is not running.
    Oracle CRS stack is down now.
    Removing script. for Oracle Cluster Ready services
    Updating ocr file for downgrade
    Cleaning up SCR settings in '/etc/oracle/scls_scr'
   
    --任意节点执行以下脚本
    [root@rac1 ~]# /opt/ora10g/product/crs/install/rootdeinstall.sh
   
    Removing contents from OCR device
    2560+0 records in
    2560+0 records out
    10485760 bytes (10 MB) copied, 7.24653 seconds, 1.4 MB/s
   
    --在上步的同意节点执行root脚本
    [root@rac1 ~]# /opt/ora10g/product/crs/root.sh
    WARNING: directory '/opt/ora10g/product' is not owned by root
    WARNING: directory '/opt/ora10g' is not owned by root
    Checking to see if Oracle CRS stack is already configured
   
    Setting the permissions on OCR backup directory
    Setting up NS directories
    Oracle Cluster Registry configuration upgraded successfully
    WARNING: directory '/opt/ora10g/product' is not owned by root
    WARNING: directory '/opt/ora10g' is not owned by root
    assigning default hostname rac1 for node 1.
    assigning default hostname rac2 for node 2.
    Successfully accumulated necessary OCR keys.
    Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
    node :
    node 1: rac1 rac1-priv rac1
    node 2: rac2 rac2-priv rac2
    Creating OCR keys for user 'root', privgrp 'root'..
    Operation successful.
    Now formatting voting device: /dev/raw/raw2
    Format of 1 voting devices complete.
    Startup will be queued to init within 90 seconds.
    Adding daemons to inittab
    Expecting the CRS daemons to be up within 600 seconds.
    CSS is active on these nodes.
            rac1
    CSS is inactive on these nodes.
            rac2
    Local node checking complete.
    Run root.sh on remaining nodes to start CRS daemons.
   
    --在其他节点执行root脚本
    [root@rac2 ~]# /opt/ora10g/product/crs/root.sh
    WARNING: directory '/opt/ora10g/product' is not owned by root
    WARNING: directory '/opt/ora10g' is not owned by root
    Checking to see if Oracle CRS stack is already configured
   
    Setting the permissions on OCR backup directory
    Setting up NS directories
    Oracle Cluster Registry configuration upgraded successfully
    WARNING: directory '/opt/ora10g/product' is not owned by root
    WARNING: directory '/opt/ora10g' is not owned by root
    clscfg: EXISTING configuration version 3 detected.
    clscfg: version 3 is 10G Release 2.
    assigning default hostname rac1 for node 1.
    assigning default hostname rac2 for node 2.
    Successfully accumulated necessary OCR keys.
    Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
    node :
    node 1: rac1 rac1-priv rac1
    node 2: rac2 rac2-priv rac2
    clscfg: Arguments check out successfully.
   
    NO KEYS WERE WRITTEN. Supply -force parameter to override.
    -force is destructive and will destroy any previous cluster
    configuration.
    Oracle Cluster Registry for cluster has already been initialized
    Startup will be queued to init within 90 seconds.
    Adding daemons to inittab
    Expecting the CRS daemons to be up within 600 seconds.
    CSS is active on these nodes.
            rac1
            rac2
    CSS is active on all nodes.
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Waiting for the Oracle CRSD and EVMD to start
    Oracle CRS stack installed and running under init(1M)
    Running vipca(silent) for configuring nodeapps
    Error 0(Native: listNetInterfaces:[3])
    [Error 0(Native: listNetInterfaces:[3])]
   
    --若在其他节点出错,则进行以下操作
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg iflist
    eth0  192.168.137.0
    eth1  192.168.136.0
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.137.0:public
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
    [root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg getif
    eth0  192.168.137.0  global  public
    eth0  192.168.136.0  global  cluster_interconnect
   
    --检查CRS
    [oracle@rac1 ~]$ crsctl check crs
    CSS appears healthy
    CRS appears healthy
    EVM appears healthy
    [oracle@rac1 ~]$ crs_stat -t -v
    CRS-0202: No resources are registered.
   
    --配置VIP
    [oracle@rac1 ~]$ su -
    Password:
    [root@rac1 ~]# /opt/ora10g/product/crs/bin/vipca
    [root@rac1 ~]# su - oracle
    [oracle@rac1 ~]$ crs_stat -t
    Name           Type           Target    State     Host       
    ------------------------------------------------------------
    ora.rac1.gsd   application    ONLINE    ONLINE    rac1       
    ora.rac1.ons   application    ONLINE    ONLINE    rac1       
    ora.rac1.vip   application    ONLINE    ONLINE    rac1       
    ora.rac2.gsd   application    ONLINE    ONLINE    rac2       
    ora.rac2.ons   application    ONLINE    ONLINE    rac2       
    ora.rac2.vip   application    ONLINE    ONLINE    rac2       
   
    --注册相关服务
    [oracle@rac1 ~]$ srvctl add asm -n rac1 -i +ASM1 -o /opt/ora10g/product/database
    [oracle@rac1 ~]$ srvctl add asm -n rac2 -i +ASM2 -o /opt/ora10g/product/database
    [oracle@rac1 ~]$ srvctl start asm -n rac1
    [oracle@rac1 ~]$ srvctl start asm -n rac2
    [oracle@rac1 ~]$ srvctl add database -d racdb -o /opt/ora10g/product/database
    [oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb1 -n rac1
    [oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb2 -n rac2
    [oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb1 -s +ASM1
    [oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb2 -s +ASM2
    [oracle@rac1 ~]$ srvctl start database -d racdb
   
    --配置listener(先删除,再重建)
    注:需要注意用户对等性
    [oracle@rac1 ~]$netca
   
    --查看资源状态
    [oracle@rac1 ~]$ crs_stat -t -v
    Name           Type           R/RA   F/FT   Target    State     Host       
    ----------------------------------------------------------------------
    ora....SM1.asm application    0/5    0/0    ONLINE    ONLINE    rac1       
    ora....C1.lsnr application    0/5    0/0    ONLINE    ONLINE    rac1       
    ora.rac1.gsd   application    0/5    0/0    ONLINE    ONLINE    rac1       
    ora.rac1.ons   application    0/3    0/0    ONLINE    ONLINE    rac1       
    ora.rac1.vip   application    0/0    0/0    ONLINE    ONLINE    rac1       
    ora....SM2.asm application    0/5    0/0    ONLINE    ONLINE    rac2       
    ora....C2.lsnr application    0/5    0/0    ONLINE    ONLINE    rac2       
    ora.rac2.gsd   application    0/5    0/0    ONLINE    ONLINE    rac2       
    ora.rac2.ons   application    0/3    0/0    ONLINE    ONLINE    rac2       
    ora.rac2.vip   application    0/0    0/0    ONLINE    ONLINE    rac2       
    ora.racdb.db   application    0/0    0/1    ONLINE    ONLINE    rac1       
    ora....b1.inst application    0/5    0/0    ONLINE    ONLINE    rac1       
    ora....b2.inst application    0/5    0/0    ONLINE    ONLINE    rac2    
   
 

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/26143577/viewspace-744482/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/26143577/viewspace-744482/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值