單節點10.2.0.4RAC安裝
echo $SHELL
關閉sendmail服務,加快啟動
# chkconfig sendmail off
完成作業系統的安裝後
1)增加兩個硬碟
2)新增一張新網路卡eth1.
eth0 為RAC的公共網路NAT
eth1 為RAC的私有網路HOST ONLY
#vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
BOOTPROTO=static
BROADCAST=192.168.208.255
IPADDR=192.168.208.11
GATEWAY=192.168.208.1
NETMASK=255.255.255.0
HWADDR=00:0C:29:49:8A:CB
ONBOOT=yes
TYPE=Ethernet
#vi /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
BOOTPROTO=static
IPADDR=192.168.136.31
GATEWAY=192.168.136.1
HWADDR=00:0C:29:49:8A:D5
ONBOOT=yes
TYPE=Ethernet
#service network restart
3)修改主機名和hosts
# vi /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=node1
# vi /etc/hosts
127.0.0.1 localhost.localdomain localhost
192.168.208.11 node1
192.168.208.21 node1-vip
192.168.136.31 node1-priv
4)建立組oinstall, dba,使用者oracle
#groupadd oinstall
#groupadd dba
#useradd -g oinstall -G dba oracle
#passwd oracle
#id oracle
5)修改oracle使用者的初始化引數檔案
#vi /home/oracle/.bash_profile
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_BASE=/opt/ora10g
export ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
export ORACLE_SID=RACDB1
export ORACLE_TERM=xterm
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin/
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
unset USERNAME
6)建立oracle的安裝目錄並授權
#mkdir /opt/ora10g
#chown oracle:oinstall /opt/ora10g
7)修改作業系統核心引數
/*配置核心引數*/
#vi /etc/sysctl.conf
kernel.shmall = 2097152
kernel.shmmax = 536870912
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 65536
net.ipv4.ip_local_port_range = 1024 65000
net.core.rmem_default = 1048576
net.core.rmem_max = 1048576
net.core.wmem_default = 262144
net.core.wmem_max = 262144
/*設定oracle 使用的檔案數許可權*/
#vi /etc/security/limits.conf
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
/*修改安全限制*/
#vi /etc/pam.d/login
session required /lib/security/pam_limits.so
session required pam_limits.so
/*配置Hangcheck 計時器*/
#vi /etc/rc.local
modprobe hangcheck-timer hangcheck-tick=30 hangcheck_margin=180
8)磁碟分割槽
[root@node1 ~]# fdisk -l
Disk /dev/sda: 12.8 GB, 12884901888 bytes
255 heads, 63 sectors/track, 1566 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 204 1534207+ 82 Linux swap
/dev/sda3 205 1566 10940265 83 Linux
Disk /dev/sdb: 1073 MB, 1073741824 bytes
255 heads, 63 sectors/track, 130 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 65 522081 83 Linux
/dev/sdb2 66 130 522112+ 83 Linux
Disk /dev/sdc: 8589 MB, 8589934592 bytes
255 heads, 63 sectors/track, 1044 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdc1 1 522 4192933+ 83 Linux
/dev/sdc2 523 1044 4192965 83 Linux
9)配置裸裝置
+用服務的方式繫結,開機時自動載入裸裝置
#vi /etc/sysconfig/rawdevices
/dev/raw/raw1 /dev/sdb1
/dev/raw/raw2 /dev/sdb2
然後啟動服務:
#service rawdevices stop
#service rawdevices start
10)修改裸裝置的屬主
+修改/etc/udev/permissions.d/50-udev.permissions檔案
將/etc/udev/permissions.d/50-udev.permissions的113行
從
raw/*:root:disk:0660
修改為
raw/*:oracle:oinstall:0660
11)安裝oracleasmlib程式包
compat-gcc-7.3-2.96.128.i386.rpm
compat-gcc-c++-7.3-2.96.128.i386.rpm
compat-libstdc++-7.3-2.96.128.i386.rpm
compat-libstdc++-devel-7.3-2.96.128.i386.rpm
oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm
oracleasmlib-2.0.2-1.i386.rpm
oracleasm-support-2.1.1-1.el4.i386.rpm
# rpm -Uvh oracleasm-support-2.1.1-1.el4.i386.rpm
# rpm -Uvh oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm
# rpm -Uvh oracleasmlib-2.0.2-1.i386.rpm
+安裝包compat-libstdc++-7.3-2.96.128.i386.rpm時有衝突,需要刪除compat-libstdc++-296-2.96-132.7.2
[root@node2 ~]# mount.cifs //192.168.245.1/linux /mnt/share -o user=administrator
[root@node2 share]# rpm -Uvh compat-libstdc++-7.3-2.96.128.i386.rpm
warning: compat-libstdc++-7.3-2.96.128.i386.rpm: V3 DSA signature: NOKEY, key ID db42a60e
Preparing... ########################################### [100%]
file /usr/lib/libstdc++-2-libc6.1-1-2.9.0.so from install of
compat-libstdc++-7.3-2.96.128 conflicts with file from package
compat-libstdc++-296-2.96-132.7.2
file
/usr/lib/libstdc++-3-libc6.2-2-2.10.0.so from install of
compat-libstdc++-7.3-2.96.128 conflicts with file from package
compat-libstdc++-296-2.96-132.7.2
[root@node2 share]# rpm -e compat-libstdc++-296-2.96-132.7.2
[root@node2 share]# rpm -Uvh compat-libstdc++-7.3-2.96.128.i386.rpm
warning: compat-libstdc++-7.3-2.96.128.i386.rpm: V3 DSA signature: NOKEY, key ID db42a60e
Preparing... ########################################### [100%]
1:compat-libstdc++ ########################################### [100%]
/**/
Download Page
14)配置clusterware安裝環境
+oracle 使用者下生成公鑰和私鑰
++in node1
#su - oracle
$mkdir ~/.ssh
$chmod 700 ~/.ssh
$ssh-keygen -t rsa
$ssh-keygen -t dsa
++in node1
$cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
$cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
+在兩個節點測試相互執行是否需要輸入密碼
ssh node1 date
ssh node1-priv date
15)配置ASM
+in node1
[root@node1 ~]#/etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: oracle
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
+in node1
[root@node1 ~]# /etc/init.d/oracleasm createdisk VOL1 /dev/sdc1
Marking disk "VOL1" as an ASM disk: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm createdisk VOL2 /dev/sdc2
Marking disk "VOL2" as an ASM disk: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@node1 ~]# /etc/init.d/oracleasm listdisks
VOL1
VOL2
16)安裝clusterware
+in node1檢查安裝前環境
$clusterware/cluvfy/runcluvfy.sh stage -pre crsinst -n node1,node2 -verbose
此時只有一處錯誤,是由bug引起的,可以忽略。DOC ID:338924.1
====================================
Suitable interfaces for the private interconnect on subnet "192.168.245.0":
node2 eth0:192.168.245.12
node1 eth0:192.168.245.11
Suitable interfaces for the private interconnect on subnet "192.168.31.0":
node2 eth1:192.168.31.32
node1 eth1:192.168.31.31
ERROR:
Could not find a suitable set of interfaces for VIPs.
Result: Node connectivity check failed.
====================================
17)安裝CRS
+in node1
#xhost +
#su - oracle
$./runInstaller
..
...
....
[root@node2 ~]# sh /opt/ora10g/oraInventory/orainstRoot.sh
Changing permissions of /opt/ora10g/oraInventory to 770.
Changing groupname of /opt/ora10g/oraInventory to oinstall.
The execution of the script. is complete
[root@node2 ~]# sh /opt/ora10g/product/10.2.0/crs_1/root.sh
WARNING: directory '/opt/ora10g/product/10.2.0' is not owned by root
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Checking to see if Oracle CRS stack is already configured
/etc/oracle does not exist. Creating it now.
Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/opt/ora10g/product/10.2.0' is not owned by root
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
assigning default hostname node1 for node 1.
assigning default hostname node2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 1: node1 node1-priv node1
node 2: node2 node2-priv node2
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
Now formatting voting device: /dev/raw/raw4
Format of 1 voting devices complete.
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
node1
node2
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
The given interface(s), "eth0" is not public. Public interfaces should be used to configure virtual IPs.
[root@node2 ~]#
/*手工重新配置node1-vip和node2-vip*/
[root@node2 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/vipca
IP Alias Name IP address
node1-vip 192.168.245.21
node2-vip 192.168.245.22
安裝結束後Exit.回到node1,按OK結束.
在Configuration Assistants中
Oracle Cluster Verification Utility 檢查失敗,但不影響正常執行。
Next -> Finish完成clusterware安裝.
+檢查安裝後cluster的狀態
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
ora.node2.gsd application ONLINE ONLINE node2
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip application ONLINE ONLINE node2
[root@node1 ~]#
18)安裝database軟體
+in node1
過程略
19)upgrate cluster&database software to 10.2.0.4
+下載10.2.0.4升級包p6810189_10204_Linux-x86.zip
安裝時選擇相應的HOME升級clusterware home & database home到10.2.0.4
1)stop listener,database
2)upgrade clusterware
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
[oracle@node1 cluster]$ ./runIstall
...
...
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crsctl stop crs
Stopping resources.
Successfully stopped CRS resources
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/install/root102.sh
Creating pre-patch directory for saving pre-patch clusterware files
Completed patching clusterware files to /opt/ora10g/product/10.2.0/crs_1
Relinking some shared libraries.
Relinking of patched files is complete.
WARNING: directory '/opt/ora10g/product/10.2.0' is not owned by root
WARNING: directory '/opt/ora10g/product' is not owned by root
WARNING: directory '/opt/ora10g' is not owned by root
Preparing to recopy patched init and RC scripts.
Recopying init and RC scripts.
Startup will be queued to init within 30 seconds.
Starting up the CRS daemons.
Waiting for the patched CRS daemons to start.
This may take a while on some systems.
.
10204 patch successfully applied.
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node
node 1: node1 node1-priv node1
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
clscfg -upgrade completed successfully
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
[root@node1 ~]# /opt/ora10g/product/10.2.0/crs_1/bin/crsctl query crs softwareversion
CRS software version on node [node1] is [10.2.0.4.0]
[root@node1 ~]#
3)database software patch
[root@node1 ~]# /opt/ora10g/product/10.2.0/db_1/root.sh
Running Oracle10 root.sh script...
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /opt/ora10g/product/10.2.0/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The file "dbhome" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "oraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
The file "coraenv" already exists in /usr/local/bin. Overwrite it? (y/n)
[n]:
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
[root@node1 ~]#
20)建立ASM例項和資料庫
+in node1
過程略
[oracle@node1 cssd]$ /opt/ora10g/product/10.2.0/crs_1/bin/crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora....B1.inst application ONLINE ONLINE node1
ora.RACDB.db application ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application ONLINE ONLINE node1
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip application ONLINE ONLINE node1
[oracle@node1 cssd]$
本文轉自:http://blog.chinaunix.net/u1/46888/showart_2083713.html
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/90618/viewspace-619616/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- greenplum單節點安裝
- vertica單節點安裝教程
- CentOS 6.6安裝單節點FastDFSCentOSAST
- CentOS 6.6安裝單節點Redis 3.0.3CentOSRedis
- linux下安裝redis 單節點安裝操作步驟LinuxRedis
- 多節點ipfs安裝
- 2節點RAC安裝
- 安裝 Hadoop:設定單節點 Hadoop 叢集Hadoop
- dubbo系列(一):linux安裝dubbo與zookeeper(單節點)Linux
- RAC-解除安裝grid(單節點錯誤時)
- MySQL高可用工具Orchestrator系列一:單節點模式安裝MySql模式
- exadata vmwate 安裝儲存節點
- redhat安裝雙節點cassandra叢集Redhat
- Node-red節點安裝換源
- RAC 雙節點 轉單節點流程
- Redis的安裝及建立節點、部署群集Redis
- exadata vmwate 安裝資料庫節點資料庫
- 二、安裝並配置Kubernetes Master節點AST
- 三、安裝並配置Kubernetes Node節點
- hadoop叢集多節點安裝詳解Hadoop
- 安裝RAC無法識別cluster節點資訊
- 【K8S】基於單Master節點安裝K8S叢集K8SAST
- consul 多節點/單節點叢集搭建
- 在Docker中安裝MySQL 8.0.19之MGR(單主模式&多主模式)+新增節點DockerMySql模式
- 全網最詳細Apache Kylin1.5安裝(單節點)和測試案例Apache
- Windows 11.2.0.4 RAC安裝配置以及RAC新增節點Windows
- kubernetes實踐之五:Node節點安裝
- Oracle RAC解除安裝後的重灌重點環節Oracle
- 安裝crs,無法新增節點問題解決
- Solaris裸裝置安裝三節點RAC102(六)
- Solaris裸裝置安裝三節點RAC102(五)
- Solaris裸裝置安裝三節點RAC102(四)
- Solaris裸裝置安裝三節點RAC102(三)
- Solaris裸裝置安裝三節點RAC102(二)
- Solaris裸裝置安裝三節點RAC102(一)
- zookeeper 叢集安裝(單點與分散式成功安裝)分散式
- 《2臺虛擬機器》docker單節點線上安裝rancher2.5.2最新穩定版虛擬機Docker
- Rancher 系列文章-RHEL7.8 離線有代理條件下安裝單節點 Rancher