Oracle RAC環境新增新的network和listener(未完成)
作業系統版本:
資料庫版本:
環境說明:
過程步驟:
按照metalink文件 ID 1063571.1往叢集新增network的步驟,是會有問題的,具體過程如下:
跑節點2上去了,讓它跑回節點1:
檢視日誌並無明顯錯誤資訊。
-
[oracle@rac2 ~]$ uname -a
-
Linux rac2.example.com 2.6.32-431.el6.x86_64 #1 SMP Sun Nov 10 22:19:54 EST 2013 x86_64 x86_64 x86_64 GNU/Linux
-
[oracle@rac2 ~]$ lsb_release -a
-
LSB Version: :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch
-
Distributor ID: RedHatEnterpriseServer
-
Description: Red Hat Enterprise Linux Server release 6.5 (Santiago)
-
Release: 6.5
- Codename: Santiago
資料庫版本:
-
SYS@proc2> select * from v$version where rownum=1;
-
-
BANNER
-
--------------------------------------------------------------------------------
- Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
環境說明:
-
[root@rac2 ~]# ifconfig
-
eth0 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
-
inet addr:192.168.28.200 Bcast:192.168.28.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b097/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:3154 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:2360 errors:0 dropped:0 overruns:0 carrier:0
-
collisions:0 txqueuelen:1000
-
RX bytes:283889 (277.2 KiB) TX bytes:445528 (435.0 KiB)
-
-
eth0:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:97
-
inet addr:192.168.28.222 Bcast:192.168.28.255 Mask:255.255.255.0
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
-
eth1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
-
inet addr:10.0.0.200 Bcast:10.0.0.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b0a1/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:92201 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:72680 errors:0 dropped:0 overruns:0 carrier:0
-
collisions:0 txqueuelen:1000
-
RX bytes:66845524 (63.7 MiB) TX bytes:42997425 (41.0 MiB)
-
-
eth1:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:A1
-
inet addr:169.254.227.158 Bcast:169.254.255.255 Mask:255.255.0.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
-
##新新增的public網路卡資訊##
-
eth2 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.200 Bcast:20.20.20.255 Mask:255.255.255.0
-
inet6 addr: fe80::20c:29ff:feed:b0ab/64 Scope:Link
-
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
RX packets:115 errors:0 dropped:0 overruns:0 frame:0
-
TX packets:154 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
-
RX bytes:10309 (10.0 KiB) TX bytes:16597 (16.2 KiB)
-
[root@rac2 ~]# cat /etc/hosts ##背景色標紅為新網路卡資訊以及預新增的vip資訊##
-
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
-
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
-
-
##public##
-
192.168.28.100 rac1.example.com rac1
-
192.168.28.200 rac2.example.com rac2
-
20.20.20.100 rac1_2
-
20.20.20.200 rac2_2
-
-
##private##
-
10.0.0.100 rac1-priv.example.com rac1-priv
-
10.0.0.200 rac2-priv.example.com rac2-priv
-
-
-
##vip##
-
192.168.28.111 rac1-vip.example.com rac1-vip
-
192.168.28.222 rac2-vip.example.com rac2-vip
-
20.20.20.111 rac1-vip2.example.com rac1-vip2
-
20.20.20.222 rac2-vip2.example.com rac2-vip2
-
-
-
##scan##
- 192.168.28.233 scan-ip
過程步驟:
按照metalink文件 ID 1063571.1往叢集新增network的步驟,是會有問題的,具體過程如下:
-
[root@rac2 ~]# srvctl add network -k 2 -S 20.20.20.0/255.255.255.0/eth2
-
[root@rac2 ~]# srvctl config network
-
Network exists: 1/192.168.28.0/255.255.255.0/eth0, type static
- Network exists: 2/20.20.20.0/255.255.255.0/eth2, type static
-
[root@rac2 ~]# srvctl status vip -n rac1
-
VIP 20.20.20.111 is enabled
-
VIP 20.20.20.111 is not running
-
VIP rac1-vip is enabled
-
VIP rac1-vip is running on node: rac1
-
[root@rac2 ~]# srvctl status vip -n rac2
-
VIP 20.20.20.222 is enabled
-
VIP 20.20.20.222 is not running
-
VIP rac2-vip is enabled
-
VIP rac2-vip is running on node: rac2
-
[root@rac2 ~]#
-
[root@rac2 ~]# srvctl start vip -n rac1
-
PRKO-2420 : VIP is already started on node(s): rac1
-
[root@rac2 ~]# srvctl start vip -i 20.20.20.111
-
PRKO-2420 : VIP is already started on node(s): rac2
-
[oracle@rac2 ~]$ ifconfig eth2:1
-
eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.111 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
[root@rac2 admin]# crsctl stop res ora.rac1-vip2.vip
-
CRS-2673: Attempting to stop 'ora.rac1-vip2.vip' on 'rac2'
-
CRS-2677: Stop of 'ora.rac1-vip2.vip' on 'rac2' succeeded
-
[root@rac2 admin]# crsctl start res ora.rac1-vip2.vip -n rac1
-
CRS-2672: Attempting to start 'ora.net2.network' on 'rac1'
-
CRS-2676: Start of 'ora.net2.network' on 'rac1' succeeded
-
CRS-2672: Attempting to start 'ora.rac1-vip2.vip' on 'rac1'
- CRS-2676: Start of 'ora.rac1-vip2.vip' on 'rac1' succeeded
-
[root@rac2 admin]# srvctl start vip -n rac2
-
PRKO-2420 : VIP is already started on node(s): rac2
-
[root@rac2 admin]# ifconfig eth2:1
-
eth2:1 Link encap:Ethernet HWaddr 00:0C:29:ED:B0:AB
-
inet addr:20.20.20.222 Bcast:20.20.20.255 Mask:255.255.255.0
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
-
[root@rac2 admin]# srvctl config vip -n rac1
-
VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
-
VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
-
[root@rac2 admin]# srvctl config vip -n rac2
-
VIP exists: /20.20.20.222/20.20.20.222/20.20.20.0/255.255.255.0/eth2, hosting node rac2
-
VIP exists: /rac2-vip/192.168.28.222/192.168.28.0/255.255.255.0/eth0, hosting node rac2
-
[root@rac2 admin]# srvctl status vip -n rac1
-
VIP 20.20.20.111 is enabled
-
VIP 20.20.20.111 is running on node: rac1
-
VIP rac1-vip is enabled
-
VIP rac1-vip is running on node: rac1
-
[root@rac2 admin]# srvctl status vip -n rac2
-
VIP 20.20.20.222 is enabled
-
VIP 20.20.20.222 is running on node: rac2
-
VIP rac2-vip is enabled
- VIP rac2-vip is running on node: rac2
-
[root@rac2 admin]# crsctl stat res -t
- ---省略部分內容---
-
--------------------------------------------------------------------------------
-
Cluster Resources
-
--------------------------------------------------------------------------------
-
ora.LISTENER_SCAN1.lsnr
-
1 ONLINE ONLINE rac2
-
ora.cvu
-
1 ONLINE ONLINE rac2
-
ora.oc4j
-
1 ONLINE ONLINE rac1
-
ora.proc.db
-
1 ONLINE ONLINE rac1 Open
-
2 ONLINE ONLINE rac2 Open
-
ora.rac1-vip2.vip
-
1 ONLINE INTERMEDIATE rac1 FAILED OVER
-
ora.rac1.vip
-
1 ONLINE ONLINE rac1
-
ora.rac2-vip2.vip
-
1 ONLINE INTERMEDIATE rac2 FAILED OVER
-
ora.rac2.vip
-
1 ONLINE ONLINE rac2
-
ora.scan1.vip
- 1 ONLINE ONLINE rac2
-
[root@rac2 admin]# srvctl stop vip -n rac1
-
PRCR-1014 : Failed to stop resource ora.rac1.vip
-
PRCR-1065 : Failed to stop resource ora.rac1.vip
-
CRS-2529: Unable to act on 'ora.rac1.vip' because that would require stopping or relocating 'ora.LISTENER.lsnr', but the force option was not specified
-
[root@rac2 admin]#
-
[root@rac2 admin]# srvctl stop vip -n rac1 -f
-
PRCC-1017 : 20.20.20.111 was already stopped on rac1
-
PRCR-1005 : Resource ora.rac1-vip2.vip is already stopped
-
[root@rac2 admin]# srvctl stop vip -n rac2 -f
-
[root@rac2 admin]#
-
[root@rac2 admin]# crsctl stop res ora.net2.network
-
CRS-2673: Attempting to stop 'ora.net2.network' on 'rac2'
-
CRS-2673: Attempting to stop 'ora.net2.network' on 'rac1'
-
CRS-2677: Stop of 'ora.net2.network' on 'rac1' succeeded
-
CRS-2677: Stop of 'ora.net2.network' on 'rac2' succeeded
-
[root@rac2 admin]# srvctl config vip -n rac1
-
VIP exists: /20.20.20.111/20.20.20.111/20.20.20.0/255.255.255.0/eth2, hosting node rac1
-
VIP exists: /rac1-vip/192.168.28.111/192.168.28.0/255.255.255.0/eth0, hosting node rac1
-
[root@rac2 admin]# srvctl remove vip -i 20.20.20.111
-
Please confirm that you intend to remove the VIPs 20.20.20.111 (y/[n]) y
-
[root@rac2 admin]# srvctl remove vip -i 20.20.20.222
-
Please confirm that you intend to remove the VIPs 20.20.20.222 (y/[n]) y
-
[root@rac2 admin]# srvctl remove network -k 2
-
PRCR-1001 : Resource ora.net2.network does not exist
- [root@rac2 admin]#
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/30174570/viewspace-2152633/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- Oracle RAC 環境下的連線管理Oracle
- [20211013]Oracle 19c新特性Listener自動清理(Network Log File Segmentation).txtOracleSegmentation
- Oracle RAC環境下ASM磁碟組擴容OracleASM
- 【NETWORK】Oracle RAC 心跳地址配置說明Oracle
- oracle 11G RAC的建立(VM虛擬環境)Oracle
- Oracle RAC新增節點Oracle
- RAC和ASM環境下打patchASM
- Oracle RAC 環境 引數檔案的啟動順序Oracle
- Oracle 11.2.0.4 rac for aix acfs異常環境的克隆環境ASM磁碟組掛載緩慢OracleAIASM
- Oracle RAC一鍵部署001(主機環境校驗)Oracle
- ORACLE 12C RAC 生產環境搭建介紹Oracle
- 通過ORACLE VM virtualbox環境安裝oracle 11G RAC(ASM)OracleASM
- RAC環境修改spfile的位置
- 關於RAC的remote_listenerREM
- 【Mongodb】分片複製集環境新增新的分片MongoDB
- 超大記憶體環境下的Oracle RAC引數設定建議記憶體Oracle
- Oracle 12cR1 RAC叢集安裝(一)--環境準備Oracle
- rabbitmq環境,c#程式接收q,(未完成)MQC#
- oracle11g RAC新增節點Oracle
- KingbaseES RAC部署案例之---SAN環境構建RAC
- Mac 設定環境變數的位置、檢視和新增PATH環境變數Mac變數
- Oracle 11g RAC手動新增serviceOracle
- 京東雲環境搭建oracle rac詳細部署梳理(可信的結果輸出)Oracle
- 【RAC】Oracle10g rac新增刪除節點命令參考Oracle
- ubuntu 快捷新增和刪除環境變數Ubuntu變數
- 手工清理19c RAC環境
- RAC環境下建立物理DATAGUARD(1)
- RAC環境下建立物理DATAGUARD(2)
- oracle11g_RAC新增刪除節點Oracle
- Oracle 11g RAC重新新增節點Oracle
- [Oracle] -- 配置Oracle環境變數Oracle變數
- RAC環境下的SEQUENCE對應用的影響
- Java環境變數新增Java變數
- jupyter中新增conda環境
- 如何在rac環境中增加監聽
- Oracle優化案例-新增RAC節點(二十九)Oracle優化
- AIX 5.3/6.1環境下安裝Oracle 10gR2 RAC常見報錯AIOracle 10g
- rac 新增第二public ip 和 vip