Oracle RAC環境新增新的network和listener(未完成)
作業系統版本:
資料庫版本:
環境說明:
過程步驟:
按照metalink文件 ID 1063571.1往叢集新增network的步驟,是會有問題的,具體過程如下:
跑節點2上去了,讓它跑回節點1:
檢視日誌並無明顯錯誤資訊。
-
[oracle@rac2 ~]$ uname -a
-
Linux rac2.example.com 2.6.32-431.el6.x86_64 #1 SMP Sun Nov 10 22:19:54 EST 2013 x86_64 x86_64 x86_64 GNU/Linux
-
[oracle@rac2 ~]$ lsb_release -a
-
LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch
-
Distributor ID: RedHatEnterpriseServer
-
Description: Red Hat Enterprise Linux Server release 6.5 (Santiago)
-
Release: 6.5
- Codename: Santiago
資料庫版本:
-
SYS@proc2> select * from v$version where rownum=1;
-
-
BANNER
-
--------------------------------------------------------------------------------
- Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
環境說明:
-
[root@rac2 ~]# ifconfig
-
eth0 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
-
inet addr:192.168.28.200 Bcast:192.168.28.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b097/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:3154 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:2360 errors:0 dropped:0 overruns:0 carrier:0
-
collisions:0 txqueuelen:1000
-
RX bytes:283889 (277.2 KiB) TX bytes:445528 (435.0 KiB)
-
-
eth0:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
-
inet addr:192.168.28.222 Bcast:192.168.28.255 Mask:255.255.255.0
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
-
eth1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
-
inet addr:10.0.0.200 Bcast:10.0.0.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b0a1/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:92201 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:72680 errors:0 dropped:0 overruns:0 carrier:0
-
collisions:0 txqueuelen:1000
-
RX bytes:66845524 (63.7 MiB) TX bytes:42997425 (41.0 MiB)
-
-
eth1:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
-
inet addr:169.254.227.158 Bcast:169.254.255.255 Mask:255.255.0.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
-
##新新增的public網路卡資訊##
-
eth2 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.200 Bcast:20.20.20.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b0ab/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:115 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:154 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
-
RX bytes:10309 (10.0 KiB) TX bytes:16597 (16.2 KiB)
-
[root@rac2 ~]# cat /etc/hosts ##背景色標紅為新網路卡資訊以及預新增的vip資訊##
-
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
-
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
-
-
##public##
-
192.168.28.100 rac1.example.com rac1
-
192.168.28.200 rac2.example.com rac2
-
20.20.20.100 rac1_2
-
20.20.20.200 rac2_2
-
-
##private##
-
10.0.0.100 rac1-priv.example.com rac1-priv
-
10.0.0.200 rac2-priv.example.com rac2-priv
-
-
-
##vip##
-
192.168.28.111 rac1-vip.example.com rac1-vip
-
192.168.28.222 rac2-vip.example.com rac2-vip
-
20.20.20.111 rac1-vip2.example.com rac1-vip2
-
20.20.20.222 rac2-vip2.example.com rac2-vip2
-
-
-
##scan##
- 192.168.28.233 scan-ip
過程步驟:
按照metalink文件 ID 1063571.1往叢集新增network的步驟,是會有問題的,具體過程如下:
-
[root@rac2 ~]# srvctl add network -k 2 -S 20.20.20.0/255.255.255.0/eth2
-
[root@rac2 ~]# srvctl config network
-
Network exists: 1/192.168.28.0/255.255.255.0/eth0, type static
- Network exists: 2/20.20.20.0/255.255.255.0/eth2, type static
-
[root@rac2 ~]# srvctl status vip -n rac1
-
VIP 20.20.20.111 is enabled
-
VIP 20.20.20.111 is not running
-
VIP rac1-vip is enabled
-
VIP rac1-vip is running on node: rac1
-
[root@rac2 ~]# srvctl status vip -n rac2
-
VIP 20.20.20.222 is enabled
-
VIP 20.20.20.222 is not running
-
VIP rac2-vip is enabled
-
VIP rac2-vip is running on node: rac2
-
[root@rac2 ~]#
-
[root@rac2 ~]# srvctl start vip -n rac1
-
PRKO-2420 : VIP is already started on node(s): rac1
-
[root@rac2 ~]# srvctl start vip -i 20.20.20.111
-
PRKO-2420 : VIP is already started on node(s): rac2
-
[oracle@rac2 ~]$ ifconfig eth2:1
-
eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.111 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
[root@rac2 admin]# crsctl stop res ora.rac1-vip2.vip
-
CRS-2673: Attempting to stop 'ora.rac1-vip2.vip' on 'rac2'
-
CRS-2677: Stop of 'ora.rac1-vip2.vip' on 'rac2' succeeded
-
[root@rac2 admin]# crsctl start res ora.rac1-vip2.vip -n rac1
-
CRS-2672: Attempting to start 'ora.net2.network' on 'rac1'
-
CRS-2676: Start of 'ora.net2.network' on 'rac1' succeeded
-
CRS-2672: Attempting to start 'ora.rac1-vip2.vip' on 'rac1'
- CRS-2676: Start of 'ora.rac1-vip2.vip' on 'rac1' succeeded
-
[root@rac2 admin]# srvctl start vip -n rac2
-
PRKO-2420 : VIP is already started on node(s): rac2
-
[root@rac2 admin]# ifconfig eth2:1
-
eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.222 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
[root@rac2 admin]# srvctl config vip -n rac1
-
VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
-
VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
-
[root@rac2 admin]# srvctl config vip -n rac2
-
VIP exists: /20.20.20.222/20.20.20.222/20.20.20.0/255.255.255.0/eth2, hosting node rac2
-
VIP exists: /rac2-vip/192.168.28.222/192.168.28.0/255.255.255.0/eth0, hosting node rac2
-
[root@rac2 admin]# srvctl status vip -n rac1
-
VIP 20.20.20.111 is enabled
-
VIP 20.20.20.111 is running on node: rac1
-
VIP rac1-vip is enabled
-
VIP rac1-vip is running on node: rac1
-
[root@rac2 admin]# srvctl status vip -n rac2
-
VIP 20.20.20.222 is enabled
-
VIP 20.20.20.222 is running on node: rac2
-
VIP rac2-vip is enabled
- VIP rac2-vip is running on node: rac2
-
[root@rac2 admin]# crsctl stat res -t
- ---省略部分內容---
-
--------------------------------------------------------------------------------
-
Cluster Resources
-
--------------------------------------------------------------------------------
-
ora.LISTENER_SCAN1.lsnr
-
1 ONLINE ONLINE rac2
-
ora.cvu
-
1 ONLINE ONLINE rac2
-
ora.oc4j
-
1 ONLINE ONLINE rac1
-
ora.proc.db
-
1 ONLINE ONLINE rac1 Open
-
2 ONLINE ONLINE rac2 Open
-
ora.rac1-vip2.vip
-
1 ONLINE INTERMEDIATE rac1 FAILED OVER
-
ora.rac1.vip
-
1 ONLINE ONLINE rac1
-
ora.rac2-vip2.vip
-
1 ONLINE INTERMEDIATE rac2 FAILED OVER
-
ora.rac2.vip
-
1 ONLINE ONLINE rac2
-
ora.scan1.vip
- 1 ONLINE ONLINE rac2
-
[root@rac2 admin]# srvctl stop vip -n rac1
-
PRCR-1014 : Failed to stop resource ora.rac1.vip
-
PRCR-1065 : Failed to stop resource ora.rac1.vip
-
CRS-2529: Unable to act on 'ora.rac1.vip' because that would require stopping or relocating 'ora.LISTENER.lsnr', but the force option was not specified
-
[root@rac2 admin]#
-
[root@rac2 admin]# srvctl stop vip -n rac1 -f
-
PRCC-1017 : 20.20.20.111 was already stopped on rac1
-
PRCR-1005 : Resource ora.rac1-vip2.vip is already stopped
-
[root@rac2 admin]# srvctl stop vip -n rac2 -f
-
[root@rac2 admin]#
-
[root@rac2 admin]# crsctl stop res ora.net2.network
-
CRS-2673: Attempting to stop 'ora.net2.network' on 'rac2'
-
CRS-2673: Attempting to stop 'ora.net2.network' on 'rac1'
-
CRS-2677: Stop of 'ora.net2.network' on 'rac1' succeeded
-
CRS-2677: Stop of 'ora.net2.network' on 'rac2' succeeded
-
[root@rac2 admin]# srvctl config vip -n rac1
-
VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
-
VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
-
[root@rac2 admin]# srvctl remove vip -i 20.20.20.111
-
Please confirm that you intend to remove the VIPs 20.20.20.111 (y/[n]) y
-
[root@rac2 admin]# srvctl remove vip -i 20.20.20.222
-
Please confirm that you intend to remove the VIPs 20.20.20.222 (y/[n]) y
-
[root@rac2 admin]# srvctl remove network -k 2
-
PRCR-1001 : Resource ora.net2.network does not exist
- [root@rac2 admin]#
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/30174570/viewspace-2152633/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- Oracle10203RAC環境新增新節點(一)Oracle
- Oracle10203RAC環境新增新節點(六)Oracle
- Oracle10203RAC環境新增新節點(五)Oracle
- Oracle10203RAC環境新增新節點(四)Oracle
- Oracle10203RAC環境新增新節點(三)Oracle
- Oracle10203RAC環境新增新節點(二)Oracle
- 虛擬機器VMware下 Oracle RAC環境新增磁碟虛擬機Oracle
- Oracle10g RAC環境OCR的新增、刪除、備份Oracle
- Oracle10g RAC環境VoteDisk的新增、刪除、備份Oracle
- oracle rac 環境檢測Oracle
- Windows環境配置Oracle 11gR2 ListenerWindowsOracle
- unix/linux環境中Oracle 10G RAC OFF和RAC ONLinuxOracle 10g
- ORACLE RAC 環境下修改IPOracle
- Oracle RAC + Data Guard 環境搭建Oracle
- Oracle RAC 環境下的連線管理Oracle
- 【Oracle】 RAC 環境刪除oracle 之二Oracle
- 【Oracle】 RAC 環境刪除oracle 之一Oracle
- 【Mongodb】分片複製集環境新增新的分片MongoDB
- [Oracle] Oracle RAC中local_listener指定Oracle
- RAC環境中的應用程式部署——RAC部署和效能
- RAC和ASM環境下打patchASM
- ORACLE RAC環境下刪除節點Oracle
- Linux 新增環境變數和刪除環境變數Linux變數
- Oracle RAC環境下ASM磁碟組擴容OracleASM
- Oracle10g RAC ASM 環境日常管理OracleASM
- 使用 runcluvfy 校驗Oracle RAC安裝環境Oracle
- ORACLE RAC資料庫配置Dataguard環境(3)Oracle資料庫
- ORACLE RAC資料庫配置Dataguard環境(2)Oracle資料庫
- ORACLE RAC資料庫配置Dataguard環境(1)Oracle資料庫
- (轉)Oracle rac環境下清除asm例項OracleASM
- oracle 11G RAC的建立(VM虛擬環境)Oracle
- Oracle RAC環境下vip/public/private IP的區別Oracle
- 在aix oracle rac 環境下,增加lv的步驟AIOracle
- 【RAC】Oracle11g RAC新增新節點相關事項Oracle
- Oracle RAC 高可用性體系結構與叢集 單例項環境與 Oracle RAC 環境對比Oracle單例
- RAC環境中的資料庫部署技術——RAC部署和效能資料庫
- 介紹RAC環境中的應用程式部署——RAC部署和效能
- 【RAC】Oracle RAC叢集環境下日誌檔案結構Oracle