oracle10g rac常用命令整理
--檢視節點資訊
[oracle@rac1 ~]$ cd $CRS_HOME
[oracle@rac1 crs]$ pwd
/opt/ora10g/product/crs
[oracle@rac1 crs]$ cd bin/
[oracle@rac1 bin]$ ./olsnodes -n -p -i
rac1 1 rac1-priv rac1-vip
rac2 2 rac2-priv rac2-vip
--顯示網口列表
[oracle@rac1 ~]$ oifcfg iflist
eth0 192.168.137.0
eth1 192.168.136.0
--檢視每個網路卡的屬性
[oracle@rac1 ~]$ oifcfg getif
eth0 192.168.137.0 global public
eth1 192.168.136.0 global cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -global rac2
eth0 192.168.137.0 global public
eth1 192.168.136.0 global cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -node rac2
[oracle@rac1 ~]$ oifcfg getif -type cluster_interconnect
eth1 192.168.136.0 global cluster_interconnect
--刪除網路介面
[oracle@rac1 ~]$ oifcfg delif -global
[oracle@rac1 ~]$ oifcfg getif -global
--增加網路介面
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.137.0:public
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -global
eth0 192.168.137.0 global public
eth0 192.168.136.0 global cluster_interconnect
--檢查CRS
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢查服務
[oracle@rac1 ~]$ crsctl check cssd
CSS appears healthy
[oracle@rac1 ~]$ crsctl check crsd
CRS appears healthy
[oracle@rac1 ~]$ crsctl check evmd
EVM appears healthy
--關閉CRS自動重啟
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl disable crs
--開啟CRS自動重啟
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl enable crs
--關閉CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--開啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
[oracle@rac1 ~]$ crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢視vote disk位置
[oracle@rac1 ~]$ crsctl query css votedisk
0. 0 /dev/raw/raw2
located 1 votedisk(s).
--檢視節點間延遲時間
--檢視disk heartbeat
[oracle@rac1 ~]$ crsctl get css disktimeout
unrecognized parameter disktimeout specified.
--檢視network heartbeat
[oracle@rac1 ~]$ crsctl get css misscount
60
--crsctl set css misscount 100
--檢視各個服務模組
[oracle@rac1 ~]$ crsctl lsmodules crs
The following are the CRS modules ::
CRSUI
CRSCOMM
CRSRTI
CRSMAIN
CRSPLACE
CRSAPP
CRSRES
CRSCOMM
CRSOCR
CRSTIMER
CRSEVT
CRSD
CLUCLS
CSSCLNT
COMMCRS
COMMNS
[oracle@rac1 ~]$ crsctl lsmodules css
The following are the CSS modules ::
CSSD
COMMCRS
COMMNS
[oracle@rac1 ~]$ crsctl lsmodules evm
The following are the EVM modules ::
EVMD
EVMDMAIN
EVMCOMM
EVMEVT
EVMAPP
EVMAGENT
CRSOCR
CLUCLS
CSSCLNT
COMMCRS
COMMNS
--新增votedisk(一般新增2個)
--檢視votedisk位置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
0. 0 /dev/raw/raw2
located 1 votedisk(s).
--停止所有CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--新增votedisk
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3
Cluster is not in a ready state for online disk addition
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3 -force
Now formatting voting disk: /dev/raw/raw3
successful addition of votedisk /dev/raw/raw3.
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw4 -force
Now formatting voting disk: /dev/raw/raw4
successful addition of votedisk /dev/raw/raw4.
--確認新增後結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
0. 0 /dev/raw/raw2
1. 0 /dev/raw/raw4
2. 0 /dev/raw/raw4
located 3 votedisk(s).
--開啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢視結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
local_only=FALSE
--檢視OCR備份
[oracle@rac1 ~]$ ocrconfig -showbackup
rac1 2012/09/07 14:20:08 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/06 18:53:34 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/06 14:53:33 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/05 15:24:48 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/05 15:24:48 /opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# pwd
/opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# ls -ltr
total 23568
-rw-r--r-- 1 root root 4018176 Sep 5 15:24 week.ocr
-rw-r--r-- 1 root root 4018176 Sep 5 15:24 day.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 14:53 backup02.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 14:53 day_.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 18:53 backup01.ocr
-rw-r--r-- 1 root root 4018176 Sep 7 14:20 backup00.ocr
--檢查OCR一致性
[oracle@rac1 ~]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3808
Available space (kbytes) : 192696
ID : 1464966774
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--OCR備份恢復例項
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
local_only=FALSE
--停止所有CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--匯出OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -export /tmp/ocr.exp
--重啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢查CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--破壞OCR
[root@rac1 ~]# dd if=/dev/zero f=/dev/raw/raw1 bs=2048 count=1024000
dd: writing `/dev/raw/raw1': No space left on device
98297+0 records in
98296+0 records out
201310208 bytes (201 MB) copied, 332.386 seconds, 606 kB/s
--檢查一致性
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
PROT-601: Failed to initialize ocrcheck
--恢復備份的OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -import /tmp/ocr.exp
--檢查一致性
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3816
Available space (kbytes) : 192688
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--啟動CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢查CRS狀態
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--移動OCR檔案(OCR只能<=2個,一主一映象)
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
--備份OCR
--檢視當前OCR配置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3820
Available space (kbytes) : 192684
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--映象當前OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocrmirror /dev/raw/raw3
--檢視映象結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3820
Available space (kbytes) : 192684
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File Name : /dev/raw/raw3
Device/File integrity check succeeded
Cluster registry integrity check succeeded
--改變primary ocr檔案位置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocr /dev/raw/raw4
--檢視OCR配置資訊
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 4938664
Used space (kbytes) : 3820
Available space (kbytes) : 4934844
ID : 1283814407
Device/File Name : /dev/raw/raw4
Device/File needs to be synchronized with the other device
Device/File Name : /dev/raw/raw3
Device/File integrity check succeeded
Cluster registry integrity check succeeded
--檢視OCR配置檔案內容是否更新
[root@rac1 ~]# cat /etc/oracle/ocr.loc
#Device/file /dev/raw/raw1 getting replaced by device /dev/raw/raw4
ocrconfig_loc=/dev/raw/raw4
ocrmirrorconfig_loc=/dev/raw/raw3
--檢視資源狀態
[oracle@rac1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip application 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip application 0/0 0/0 ONLINE ONLINE rac2
ora.racdb.db application 0/1 0/1 ONLINE ONLINE rac1
ora....b1.inst application 0/5 0/0 ONLINE ONLINE rac1
ora....b2.inst application 0/5 0/0 ONLINE ONLINE rac2
--檢視某資源詳細資訊
[oracle@rac1 ~]$ crs_stat -p ora.rac2.vip
NAME=ora.rac2.vip
TYPE=application
ACTION_SCRIPT=/opt/ora10g/product/crs/bin/racgwrap
ACTIVE_PLACEMENT=1
AUTO_START=1
CHECK_INTERVAL=60
DESCRIPTION=CRS application for VIP on a node
FAILOVER_DELAY=0
FAILURE_INTERVAL=0
FAILURE_THRESHOLD=0
HOSTING_MEMBERS=rac2
OPTIONAL_RESOURCES=
PLACEMENT=favored
REQUIRED_RESOURCES=
RESTART_ATTEMPTS=0
SCRIPT_TIMEOUT=60
START_TIMEOUT=0
STOP_TIMEOUT=0
UPTIME_THRESHOLD=7d
USR_ORA_ALERT_NAME=
USR_ORA_CHECK_TIMEOUT=0
USR_ORA_CONNECT_STR=/ as sysdba
USR_ORA_DEBUG=0
USR_ORA_DISCONNECT=false
USR_ORA_FLAGS=
USR_ORA_IF=eth0
USR_ORA_INST_NOT_SHUTDOWN=
USR_ORA_LANG=
USR_ORA_NETMASK=255.255.255.0
USR_ORA_OPEN_MODE=
USR_ORA_OPI=false
USR_ORA_PFILE=
USR_ORA_PRECONNECT=none
USR_ORA_SRV=
USR_ORA_START_TIMEOUT=0
USR_ORA_STOP_MODE=immediate
USR_ORA_STOP_TIMEOUT=0
USR_ORA_VIP=192.168.137.154
--檢視資源許可權資訊
[oracle@rac1 ~]$ crs_stat -ls
Name Owner Primary PrivGrp Permission
-----------------------------------------------------------------
ora....SM1.asm oracle oinstall rwxrwxr--
ora....C1.lsnr oracle oinstall rwxrwxr--
ora.rac1.gsd oracle oinstall rwxr-xr--
ora.rac1.ons oracle oinstall rwxr-xr--
ora.rac1.vip root oinstall rwxr-xr--
ora....SM2.asm oracle oinstall rwxrwxr--
ora....C2.lsnr oracle oinstall rwxrwxr--
ora.rac2.gsd oracle oinstall rwxr-xr--
ora.rac2.ons oracle oinstall rwxr-xr--
ora.rac2.vip root oinstall rwxr-xr--
ora.racdb.db oracle oinstall rwxrwxr--
ora....b1.inst oracle oinstall rwxrwxr--
ora....b2.inst oracle oinstall rwxrwxr--
--檢視ONS配置資訊
[oracle@rac1 conf]$ pwd
/opt/ora10g/product/crs/opmn/conf
[oracle@rac1 conf]$ cat ons.config
localport=6113 --本地(127.0.0.1)監聽埠
remoteport=6200 --遠端(除127.0.0.1)監聽埠
loglevel=3
useocr=on
--檢視埠情況
[oracle@rac1 conf]$ netstat -ano|grep 6113
tcp 0 0 127.0.0.1:6113 0.0.0.0:* LISTEN off (0.00/0/0)
tcp 0 0 127.0.0.1:1146 127.0.0.1:6113 ESTABLISHED keepalive (6092.43/0/0)
tcp 0 0 127.0.0.1:6113 127.0.0.1:1154 ESTABLISHED off (0.00/0/0)
tcp 0 0 127.0.0.1:6113 127.0.0.1:1146 ESTABLISHED off (0.00/0/0)
tcp 0 0 127.0.0.1:1154 127.0.0.1:6113 ESTABLISHED keepalive (6209.37/0/0)
[oracle@rac1 conf]$ netstat -ano|grep 6200
tcp 0 0 0.0.0.0:6200 0.0.0.0:* LISTEN off (0.00/0/0)
tcp 0 0 192.168.137.151:6200 192.168.137.152:18512 ESTABLISHED off (0.00/0/0)
--檢視ONS程式
[oracle@rac1 ~]$ ps -ef | grep ons
root 3772 1 0 10:06 ? 00:00:00 sendmail: accepting connections
oracle 6647 13385 0 10:42 pts/0 00:00:00 grep ons
oracle 17466 1 0 10:20 ? 00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d
oracle 17468 17466 0 10:20 ? 00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d
--檢視ONS執行狀態
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...
--ONS的啟動和停止
[oracle@rac1 ~]$ onsctl stop
onsctl: shutting down ons daemon ...
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is not running ...
[oracle@rac1 ~]$ onsctl start
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
onsctl: ons started
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...
--檢視ONS詳細資訊
[oracle@rac1 ~]$ onsctl debug
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
HTTP/1.1 200 OK
Content-Length: 1357
Content-Type: text/html
Response:
======== NS ========
Listeners:
NAME BIND ADDRESS PORT FLAGS SOCKET
------- --------------- ----- -------- ------
Local 127.000.000.001 6113 00000142 7
Remote 192.168.137.151 6200 00000101 8
Request No listener
Server connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
1 192.168.137.152 6200 00010005 0 1 0
Client connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
2 127.000.000.001 6113 0001001a 0 1 1
3 127.000.000.001 6113 0001001a 0 1 0
Pending connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
0 127.000.000.001 6113 00020812 0 1 0
Worker Ticket: 1/1, Idle: 361
THREAD FLAGS
-------- --------
b7868b90 00000012
b7067b90 00000012
b66abb90 00000012
Resources:
Notifications:
Received: 0, in Receive Q: 0, Processed: 0, in Process Q: 0
Pools:
Message: 24/25 (1), Link: 25/25 (1), Subscription: 24/25 (1)
--檢視OCR中註冊的資料庫資訊
[oracle@rac1 ~]$ srvctl config database
racdb
[oracle@rac1 ~]$ srvctl config database -d racdb
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--檢視某節點資訊
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1
rac1 racdb1 /opt/ora10g/product/database
--檢視VIP資訊
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -a
VIP exists.: /rac1-vip/192.168.137.153/255.255.255.0/eth0
--檢視GSD
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -g
GSD exists.
--檢視ONS
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -s
ONS daemon exists.
--檢視監聽
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -l
Listener exists.
--檢視監聽
[oracle@rac1 ~]$ srvctl config listener -n rac1
rac1 LISTENER_RAC1
--檢視ASM
[oracle@rac1 ~]$ srvctl config asm -n rac2
+ASM2 /opt/ora10g/product/database
--配置資料庫隨CRS自動啟動
[oracle@rac1 ~]$ srvctl disable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: MANUAL
ENABLE FLAG: DB DISABLED, INST DISABLED ON racdb1 racdb2
[oracle@rac1 ~]$ srvctl enable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--檢視CRS當前版本
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl query crs softwareversion
CRS software version on node [rac2] is [10.2.0.5.0]
--配置某個例項隨CRS重啟
[oracle@rac1 ~]$ srvctl disable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED, INST DISABLED ON racdb2
[oracle@rac1 ~]$ srvctl enable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--操作某一例項(停止,啟動,)
[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2
[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2 -o immediate
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o nomount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o mount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o open
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2
--OCR和Votedisk損壞恢復(無備份)
--停止所有節點的CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
--執行刪除指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Error while stopping resources. Possible cause: CRSD is down.
Stopping CSSD.
Unable to communicate with the CSS daemon.
Shutdown has begun. The daemons should exit soon.
Checking to see if Oracle CRS stack is down...
Oracle CRS stack is not running.
Oracle CRS stack is down now.
Removing script. for Oracle Cluster Ready services
Updating ocr file for downgrade
Cleaning up SCR settings in '/etc/oracle/scls_scr'
[root@rac2 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Error while stopping resources. Possible cause: CRSD is down.
Stopping CSSD.
Unable to communicate with the CSS daemon.
Shutdown has begun. The daemons should exit soon.
Checking to see if Oracle CRS stack is down...
Oracle CRS stack is not running.
Oracle CRS stack is down now.
Removing script. for Oracle Cluster Ready services
Updating ocr file for downgrade
Cleaning up SCR settings in '/etc/oracle/scls_scr'
--任意節點執行以下指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/install/rootdeinstall.sh
Removing contents from OCR device
2560+0 records in
2560+0 records out
10485760 bytes (10 MB) copied, 7.24653 seconds, 1.4 MB/s
--在上步的同意節點執行root指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/root.sh
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
assigning default hostname rac1 for node 1.
assigning default hostname rac2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node:
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
Now formatting voting device: /dev/raw/raw2
Format of 1 voting devices complete.
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
CSS is inactive on these nodes.
rac2
Local node checking complete.
Run root.sh on remaining nodes to start CRS daemons.
--在其他節點執行root指令碼
[root@rac2 ~]# /opt/ora10g/product/crs/root.sh
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
assigning default hostname rac1 for node 1.
assigning default hostname rac2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node:
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
clscfg: Arguments check out successfully.
NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
rac2
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
Error 0(Native: listNetInterfaces:[3])
[Error 0(Native: listNetInterfaces:[3])]
--若在其他節點出錯,則進行以下操作
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg iflist
eth0 192.168.137.0
eth1 192.168.136.0
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.137.0:public
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg getif
eth0 192.168.137.0 global public
eth0 192.168.136.0 global cluster_interconnect
--檢查CRS
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[oracle@rac1 ~]$ crs_stat -t -v
CRS-0202: No resources are registered.
--配置VIP
[oracle@rac1 ~]$ su -
Password:
[root@rac1 ~]# /opt/ora10g/product/crs/bin/vipca
[root@rac1 ~]# su - oracle
[oracle@rac1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.rac1.gsd application ONLINE ONLINE rac1
ora.rac1.ons application ONLINE ONLINE rac1
ora.rac1.vip application ONLINE ONLINE rac1
ora.rac2.gsd application ONLINE ONLINE rac2
ora.rac2.ons application ONLINE ONLINE rac2
ora.rac2.vip application ONLINE ONLINE rac2
--註冊相關服務
[oracle@rac1 ~]$ srvctl add asm -n rac1 -i +ASM1 -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl add asm -n rac2 -i +ASM2 -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl start asm -n rac1
[oracle@rac1 ~]$ srvctl start asm -n rac2
[oracle@rac1 ~]$ srvctl add database -d racdb -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb1 -n rac1
[oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb2 -n rac2
[oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb1 -s +ASM1
[oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb2 -s +ASM2
[oracle@rac1 ~]$ srvctl start database -d racdb
--配置listener(先刪除,再重建)
注:需要注意使用者對等性
[oracle@rac1 ~]$netca
--檢視資源狀態
[oracle@rac1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip application 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip application 0/0 0/0 ONLINE ONLINE rac2
ora.racdb.db application 0/0 0/1 ONLINE ONLINE rac1
ora....b1.inst application 0/5 0/0 ONLINE ONLINE rac1
ora....b2.inst application 0/5 0/0 ONLINE ONLINE rac2
[oracle@rac1 ~]$ cd $CRS_HOME
[oracle@rac1 crs]$ pwd
/opt/ora10g/product/crs
[oracle@rac1 crs]$ cd bin/
[oracle@rac1 bin]$ ./olsnodes -n -p -i
rac1 1 rac1-priv rac1-vip
rac2 2 rac2-priv rac2-vip
--顯示網口列表
[oracle@rac1 ~]$ oifcfg iflist
eth0 192.168.137.0
eth1 192.168.136.0
--檢視每個網路卡的屬性
[oracle@rac1 ~]$ oifcfg getif
eth0 192.168.137.0 global public
eth1 192.168.136.0 global cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -global rac2
eth0 192.168.137.0 global public
eth1 192.168.136.0 global cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -node rac2
[oracle@rac1 ~]$ oifcfg getif -type cluster_interconnect
eth1 192.168.136.0 global cluster_interconnect
--刪除網路介面
[oracle@rac1 ~]$ oifcfg delif -global
[oracle@rac1 ~]$ oifcfg getif -global
--增加網路介面
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.137.0:public
[oracle@rac1 ~]$ oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
[oracle@rac1 ~]$ oifcfg getif -global
eth0 192.168.137.0 global public
eth0 192.168.136.0 global cluster_interconnect
--檢查CRS
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢查服務
[oracle@rac1 ~]$ crsctl check cssd
CSS appears healthy
[oracle@rac1 ~]$ crsctl check crsd
CRS appears healthy
[oracle@rac1 ~]$ crsctl check evmd
EVM appears healthy
--關閉CRS自動重啟
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl disable crs
--開啟CRS自動重啟
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl enable crs
--關閉CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--開啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
[oracle@rac1 ~]$ crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢視vote disk位置
[oracle@rac1 ~]$ crsctl query css votedisk
0. 0 /dev/raw/raw2
located 1 votedisk(s).
--檢視節點間延遲時間
--檢視disk heartbeat
[oracle@rac1 ~]$ crsctl get css disktimeout
unrecognized parameter disktimeout specified.
--檢視network heartbeat
[oracle@rac1 ~]$ crsctl get css misscount
60
--crsctl set css misscount 100
--檢視各個服務模組
[oracle@rac1 ~]$ crsctl lsmodules crs
The following are the CRS modules ::
CRSUI
CRSCOMM
CRSRTI
CRSMAIN
CRSPLACE
CRSAPP
CRSRES
CRSCOMM
CRSOCR
CRSTIMER
CRSEVT
CRSD
CLUCLS
CSSCLNT
COMMCRS
COMMNS
[oracle@rac1 ~]$ crsctl lsmodules css
The following are the CSS modules ::
CSSD
COMMCRS
COMMNS
[oracle@rac1 ~]$ crsctl lsmodules evm
The following are the EVM modules ::
EVMD
EVMDMAIN
EVMCOMM
EVMEVT
EVMAPP
EVMAGENT
CRSOCR
CLUCLS
CSSCLNT
COMMCRS
COMMNS
--新增votedisk(一般新增2個)
--檢視votedisk位置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
0. 0 /dev/raw/raw2
located 1 votedisk(s).
--停止所有CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--新增votedisk
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3
Cluster is not in a ready state for online disk addition
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw3 -force
Now formatting voting disk: /dev/raw/raw3
successful addition of votedisk /dev/raw/raw3.
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl add css votedisk /dev/raw/raw4 -force
Now formatting voting disk: /dev/raw/raw4
successful addition of votedisk /dev/raw/raw4.
--確認新增後結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl query css votedisk
0. 0 /dev/raw/raw2
1. 0 /dev/raw/raw4
2. 0 /dev/raw/raw4
located 3 votedisk(s).
--開啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢視結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
local_only=FALSE
--檢視OCR備份
[oracle@rac1 ~]$ ocrconfig -showbackup
rac1 2012/09/07 14:20:08 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/06 18:53:34 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/06 14:53:33 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/05 15:24:48 /opt/ora10g/product/crs/cdata/crs
rac1 2012/09/05 15:24:48 /opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# pwd
/opt/ora10g/product/crs/cdata/crs
[root@rac1 crs]# ls -ltr
total 23568
-rw-r--r-- 1 root root 4018176 Sep 5 15:24 week.ocr
-rw-r--r-- 1 root root 4018176 Sep 5 15:24 day.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 14:53 backup02.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 14:53 day_.ocr
-rw-r--r-- 1 root root 4018176 Sep 6 18:53 backup01.ocr
-rw-r--r-- 1 root root 4018176 Sep 7 14:20 backup00.ocr
--檢查OCR一致性
[oracle@rac1 ~]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3808
Available space (kbytes) : 192696
ID : 1464966774
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--OCR備份恢復例項
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
local_only=FALSE
--停止所有CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
--匯出OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -export /tmp/ocr.exp
--重啟CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢查CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--破壞OCR
[root@rac1 ~]# dd if=/dev/zero f=/dev/raw/raw1 bs=2048 count=1024000
dd: writing `/dev/raw/raw1': No space left on device
98297+0 records in
98296+0 records out
201310208 bytes (201 MB) copied, 332.386 seconds, 606 kB/s
--檢查一致性
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
PROT-601: Failed to initialize ocrcheck
--恢復備份的OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -import /tmp/ocr.exp
--檢查一致性
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3816
Available space (kbytes) : 192688
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--啟動CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl start crs
Attempting to start CRS stack
The CRS stack will be started shortly
--檢查CRS狀態
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
--移動OCR檔案(OCR只能<=2個,一主一映象)
--檢視OCR配置檔案位置
[oracle@rac2 oracle]$ pwd
/etc/oracle
[oracle@rac2 oracle]$ more ocr.loc
ocrconfig_loc=/dev/raw/raw1
--備份OCR
--檢視當前OCR配置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3820
Available space (kbytes) : 192684
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File not configured
Cluster registry integrity check succeeded
--映象當前OCR
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocrmirror /dev/raw/raw3
--檢視映象結果
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 196504
Used space (kbytes) : 3820
Available space (kbytes) : 192684
ID : 1283814407
Device/File Name : /dev/raw/raw1
Device/File integrity check succeeded
Device/File Name : /dev/raw/raw3
Device/File integrity check succeeded
Cluster registry integrity check succeeded
--改變primary ocr檔案位置
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrconfig -replace ocr /dev/raw/raw4
--檢視OCR配置資訊
[root@rac1 ~]# /opt/ora10g/product/crs/bin/ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 2
Total space (kbytes) : 4938664
Used space (kbytes) : 3820
Available space (kbytes) : 4934844
ID : 1283814407
Device/File Name : /dev/raw/raw4
Device/File needs to be synchronized with the other device
Device/File Name : /dev/raw/raw3
Device/File integrity check succeeded
Cluster registry integrity check succeeded
--檢視OCR配置檔案內容是否更新
[root@rac1 ~]# cat /etc/oracle/ocr.loc
#Device/file /dev/raw/raw1 getting replaced by device /dev/raw/raw4
ocrconfig_loc=/dev/raw/raw4
ocrmirrorconfig_loc=/dev/raw/raw3
--檢視資源狀態
[oracle@rac1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip application 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip application 0/0 0/0 ONLINE ONLINE rac2
ora.racdb.db application 0/1 0/1 ONLINE ONLINE rac1
ora....b1.inst application 0/5 0/0 ONLINE ONLINE rac1
ora....b2.inst application 0/5 0/0 ONLINE ONLINE rac2
--檢視某資源詳細資訊
[oracle@rac1 ~]$ crs_stat -p ora.rac2.vip
NAME=ora.rac2.vip
TYPE=application
ACTION_SCRIPT=/opt/ora10g/product/crs/bin/racgwrap
ACTIVE_PLACEMENT=1
AUTO_START=1
CHECK_INTERVAL=60
DESCRIPTION=CRS application for VIP on a node
FAILOVER_DELAY=0
FAILURE_INTERVAL=0
FAILURE_THRESHOLD=0
HOSTING_MEMBERS=rac2
OPTIONAL_RESOURCES=
PLACEMENT=favored
REQUIRED_RESOURCES=
RESTART_ATTEMPTS=0
SCRIPT_TIMEOUT=60
START_TIMEOUT=0
STOP_TIMEOUT=0
UPTIME_THRESHOLD=7d
USR_ORA_ALERT_NAME=
USR_ORA_CHECK_TIMEOUT=0
USR_ORA_CONNECT_STR=/ as sysdba
USR_ORA_DEBUG=0
USR_ORA_DISCONNECT=false
USR_ORA_FLAGS=
USR_ORA_IF=eth0
USR_ORA_INST_NOT_SHUTDOWN=
USR_ORA_LANG=
USR_ORA_NETMASK=255.255.255.0
USR_ORA_OPEN_MODE=
USR_ORA_OPI=false
USR_ORA_PFILE=
USR_ORA_PRECONNECT=none
USR_ORA_SRV=
USR_ORA_START_TIMEOUT=0
USR_ORA_STOP_MODE=immediate
USR_ORA_STOP_TIMEOUT=0
USR_ORA_VIP=192.168.137.154
--檢視資源許可權資訊
[oracle@rac1 ~]$ crs_stat -ls
Name Owner Primary PrivGrp Permission
-----------------------------------------------------------------
ora....SM1.asm oracle oinstall rwxrwxr--
ora....C1.lsnr oracle oinstall rwxrwxr--
ora.rac1.gsd oracle oinstall rwxr-xr--
ora.rac1.ons oracle oinstall rwxr-xr--
ora.rac1.vip root oinstall rwxr-xr--
ora....SM2.asm oracle oinstall rwxrwxr--
ora....C2.lsnr oracle oinstall rwxrwxr--
ora.rac2.gsd oracle oinstall rwxr-xr--
ora.rac2.ons oracle oinstall rwxr-xr--
ora.rac2.vip root oinstall rwxr-xr--
ora.racdb.db oracle oinstall rwxrwxr--
ora....b1.inst oracle oinstall rwxrwxr--
ora....b2.inst oracle oinstall rwxrwxr--
--檢視ONS配置資訊
[oracle@rac1 conf]$ pwd
/opt/ora10g/product/crs/opmn/conf
[oracle@rac1 conf]$ cat ons.config
localport=6113 --本地(127.0.0.1)監聽埠
remoteport=6200 --遠端(除127.0.0.1)監聽埠
loglevel=3
useocr=on
--檢視埠情況
[oracle@rac1 conf]$ netstat -ano|grep 6113
tcp 0 0 127.0.0.1:6113 0.0.0.0:* LISTEN off (0.00/0/0)
tcp 0 0 127.0.0.1:1146 127.0.0.1:6113 ESTABLISHED keepalive (6092.43/0/0)
tcp 0 0 127.0.0.1:6113 127.0.0.1:1154 ESTABLISHED off (0.00/0/0)
tcp 0 0 127.0.0.1:6113 127.0.0.1:1146 ESTABLISHED off (0.00/0/0)
tcp 0 0 127.0.0.1:1154 127.0.0.1:6113 ESTABLISHED keepalive (6209.37/0/0)
[oracle@rac1 conf]$ netstat -ano|grep 6200
tcp 0 0 0.0.0.0:6200 0.0.0.0:* LISTEN off (0.00/0/0)
tcp 0 0 192.168.137.151:6200 192.168.137.152:18512 ESTABLISHED off (0.00/0/0)
--檢視ONS程式
[oracle@rac1 ~]$ ps -ef | grep ons
root 3772 1 0 10:06 ? 00:00:00 sendmail: accepting connections
oracle 6647 13385 0 10:42 pts/0 00:00:00 grep ons
oracle 17466 1 0 10:20 ? 00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d
oracle 17468 17466 0 10:20 ? 00:00:00 /opt/ora10g/product/crs/opmn/bin/ons -d
--檢視ONS執行狀態
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...
--ONS的啟動和停止
[oracle@rac1 ~]$ onsctl stop
onsctl: shutting down ons daemon ...
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is not running ...
[oracle@rac1 ~]$ onsctl start
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
onsctl: ons started
[oracle@rac1 ~]$ onsctl ping
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
ons is running ...
--檢視ONS詳細資訊
[oracle@rac1 ~]$ onsctl debug
Number of onsconfiguration retrieved, numcfg = 2
onscfg[0]
{node = rac1, port = 6200}
Adding remote host rac1:6200
onscfg[1]
{node = rac2, port = 6200}
Adding remote host rac2:6200
HTTP/1.1 200 OK
Content-Length: 1357
Content-Type: text/html
Response:
======== NS ========
Listeners:
NAME BIND ADDRESS PORT FLAGS SOCKET
------- --------------- ----- -------- ------
Local 127.000.000.001 6113 00000142 7
Remote 192.168.137.151 6200 00000101 8
Request No listener
Server connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
1 192.168.137.152 6200 00010005 0 1 0
Client connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
2 127.000.000.001 6113 0001001a 0 1 1
3 127.000.000.001 6113 0001001a 0 1 0
Pending connections:
ID IP PORT FLAGS SENDQ WORKER BUSY SUBS
---------- --------------- ----- -------- ---------- -------- ------ -----
0 127.000.000.001 6113 00020812 0 1 0
Worker Ticket: 1/1, Idle: 361
THREAD FLAGS
-------- --------
b7868b90 00000012
b7067b90 00000012
b66abb90 00000012
Resources:
Notifications:
Received: 0, in Receive Q: 0, Processed: 0, in Process Q: 0
Pools:
Message: 24/25 (1), Link: 25/25 (1), Subscription: 24/25 (1)
--檢視OCR中註冊的資料庫資訊
[oracle@rac1 ~]$ srvctl config database
racdb
[oracle@rac1 ~]$ srvctl config database -d racdb
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--檢視某節點資訊
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1
rac1 racdb1 /opt/ora10g/product/database
--檢視VIP資訊
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -a
VIP exists.: /rac1-vip/192.168.137.153/255.255.255.0/eth0
--檢視GSD
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -g
GSD exists.
--檢視ONS
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -s
ONS daemon exists.
--檢視監聽
[oracle@rac1 ~]$ srvctl config nodeapps -n rac1 -l
Listener exists.
--檢視監聽
[oracle@rac1 ~]$ srvctl config listener -n rac1
rac1 LISTENER_RAC1
--檢視ASM
[oracle@rac1 ~]$ srvctl config asm -n rac2
+ASM2 /opt/ora10g/product/database
--配置資料庫隨CRS自動啟動
[oracle@rac1 ~]$ srvctl disable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: MANUAL
ENABLE FLAG: DB DISABLED, INST DISABLED ON racdb1 racdb2
[oracle@rac1 ~]$ srvctl enable database -d racdb
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--檢視CRS當前版本
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl query crs softwareversion
CRS software version on node [rac2] is [10.2.0.5.0]
--配置某個例項隨CRS重啟
[oracle@rac1 ~]$ srvctl disable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED, INST DISABLED ON racdb2
[oracle@rac1 ~]$ srvctl enable instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl config database -d racdb -a
rac1 racdb1 /opt/ora10g/product/database
rac2 racdb2 /opt/ora10g/product/database
DB_NAME: racdb
ORACLE_HOME: /opt/ora10g/product/database
SPFILE: +DATA/racdb/spfileracdb.ora
DOMAIN: null
DB_ROLE: null
START_OPTIONS: null
POLICY: AUTOMATIC
ENABLE FLAG: DB ENABLED
--操作某一例項(停止,啟動,)
[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2
[oracle@rac1 ~]$ srvctl stop instance -d racdb -i racdb2 -o immediate
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is not running on node rac2
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o nomount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o mount
[oracle@rac1 ~]$ srvctl start instance -d racdb -i racdb2 -o open
[oracle@rac1 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac1
Instance racdb2 is running on node rac2
--OCR和Votedisk損壞恢復(無備份)
--停止所有節點的CRS
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
[root@rac1 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
[root@rac2 ~]# /opt/ora10g/product/crs/bin/crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
--執行刪除指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Error while stopping resources. Possible cause: CRSD is down.
Stopping CSSD.
Unable to communicate with the CSS daemon.
Shutdown has begun. The daemons should exit soon.
Checking to see if Oracle CRS stack is down...
Oracle CRS stack is not running.
Oracle CRS stack is down now.
Removing script. for Oracle Cluster Ready services
Updating ocr file for downgrade
Cleaning up SCR settings in '/etc/oracle/scls_scr'
[root@rac2 ~]# /opt/ora10g/product/crs/install/rootdelete.sh
Shutting down Oracle Cluster Ready Services (CRS):
Stopping resources.
Error while stopping resources. Possible cause: CRSD is down.
Stopping CSSD.
Unable to communicate with the CSS daemon.
Shutdown has begun. The daemons should exit soon.
Checking to see if Oracle CRS stack is down...
Oracle CRS stack is not running.
Oracle CRS stack is down now.
Removing script. for Oracle Cluster Ready services
Updating ocr file for downgrade
Cleaning up SCR settings in '/etc/oracle/scls_scr'
--任意節點執行以下指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/install/rootdeinstall.sh
Removing contents from OCR device
2560+0 records in
2560+0 records out
10485760 bytes (10 MB) copied, 7.24653 seconds, 1.4 MB/s
--在上步的同意節點執行root指令碼
[root@rac1 ~]# /opt/ora10g/product/crs/root.sh
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
assigning default hostname rac1 for node 1.
assigning default hostname rac2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
Now formatting voting device: /dev/raw/raw2
Format of 1 voting devices complete.
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
CSS is inactive on these nodes.
rac2
Local node checking complete.
Run root.sh on remaining nodes to start CRS daemons.
--在其他節點執行root指令碼
[root@rac2 ~]# /opt/ora10g/product/crs/root.sh
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Checking to see if Oracle CRS stack is already configured
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
assigning default hostname rac1 for node 1.
assigning default hostname rac2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
clscfg: Arguments check out successfully.
NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
rac1
rac2
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
Error 0(Native: listNetInterfaces:[3])
[Error 0(Native: listNetInterfaces:[3])]
--若在其他節點出錯,則進行以下操作
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg iflist
eth0 192.168.137.0
eth1 192.168.136.0
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.137.0:public
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg setif -global eth0/192.168.136.0:cluster_interconnect
[root@rac2 ~]# /opt/ora10g/product/crs/bin/oifcfg getif
eth0 192.168.137.0 global public
eth0 192.168.136.0 global cluster_interconnect
--檢查CRS
[oracle@rac1 ~]$ crsctl check crs
CSS appears healthy
CRS appears healthy
EVM appears healthy
[oracle@rac1 ~]$ crs_stat -t -v
CRS-0202: No resources are registered.
--配置VIP
[oracle@rac1 ~]$ su -
Password:
[root@rac1 ~]# /opt/ora10g/product/crs/bin/vipca
[root@rac1 ~]# su - oracle
[oracle@rac1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.rac1.gsd application ONLINE ONLINE rac1
ora.rac1.ons application ONLINE ONLINE rac1
ora.rac1.vip application ONLINE ONLINE rac1
ora.rac2.gsd application ONLINE ONLINE rac2
ora.rac2.ons application ONLINE ONLINE rac2
ora.rac2.vip application ONLINE ONLINE rac2
--註冊相關服務
[oracle@rac1 ~]$ srvctl add asm -n rac1 -i +ASM1 -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl add asm -n rac2 -i +ASM2 -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl start asm -n rac1
[oracle@rac1 ~]$ srvctl start asm -n rac2
[oracle@rac1 ~]$ srvctl add database -d racdb -o /opt/ora10g/product/database
[oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb1 -n rac1
[oracle@rac1 ~]$ srvctl add instance -d racdb -i racdb2 -n rac2
[oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb1 -s +ASM1
[oracle@rac1 ~]$ srvctl modify instance -d racdb -i racdb2 -s +ASM2
[oracle@rac1 ~]$ srvctl start database -d racdb
--配置listener(先刪除,再重建)
注:需要注意使用者對等性
[oracle@rac1 ~]$netca
--檢視資源狀態
[oracle@rac1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip application 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip application 0/0 0/0 ONLINE ONLINE rac2
ora.racdb.db application 0/0 0/1 ONLINE ONLINE rac1
ora....b1.inst application 0/5 0/0 ONLINE ONLINE rac1
ora....b2.inst application 0/5 0/0 ONLINE ONLINE rac2
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/26143577/viewspace-744482/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- rac常用命令
- about oracle10g racOracle
- Docker常用命令整理Docker
- git常用命令整理Git
- Git 常用命令整理Git
- about oracle10g rac(轉)Oracle
- Oracle10g RAC管理 - CRSOracle
- ORACLE10g修改RAC VIPOracle
- Oracle10g RAC配置standbyOracle
- 【RAC】Oracle10g RAC 節點重配的方式Oracle
- 網友整理的一份Oracle10g RAC for all-version Linux -- raw deviceOracleLinuxdev
- Linux常用命令整理Linux
- Linux——常用命令整理Linux
- (轉)整理CentOS常用命令CentOS
- Linux 常用命令整理Linux
- rac 常用命令和工具
- Oracle10g RAC 加節點Oracle
- WebMethod connect to Oracle10g RACWebOracle
- oracle10g rac for linux as 4.0OracleLinux
- 整理:RAC搭建過程
- 關於 RAC VIP (Oracle10G RAC) 的探討(zt)Oracle
- oracle10g RAC Default gateway is not defined (host=rac2) (vm)OracleGateway
- CentOS7.5常用命令整理CentOS
- Shell指令碼常用命令整理指令碼
- sqoop常用命令整理(一)OOP
- (小組)Git 常用命令整理Git
- oracle10g ASM+RAC安裝OracleASM
- Oracle10g RAC設定記錄Oracle
- VMware 搭建 Oracle10g RAC 筆記Oracle筆記
- Oracle10g VMWare RAC搭建圖解Oracle圖解
- oracle10g rac(rhel)_PROC-22Oracle
- Oracle10g RAC ASM磁碟組[zt]OracleASM
- 【RAC】Oracle10g rac新增刪除節點命令參考Oracle
- AIX操作相關常用命令整理AI
- git常見、常用命令列整理Git命令列
- RMAN常用命令——不斷整理中
- Oracle 11g RAC 常用命令Oracle
- 11gr2 rac常用命令