Oceanbase 4.0 三節點叢集x86平臺安裝實踐
1、基礎環境說明:
##系統版本
[root@observer01 ~]# cat /etc/os-release
NAME="Kylin Linux Advanced Server"
VERSION="V10 (Sword)"
ID="kylin"
VERSION_ID="V10"
PRETTY_NAME="Kylin Linux Advanced Server V10 (Sword)"
ANSI_COLOR="0;31"
##系統發行版本
[root@observer01 ~]# uname -r
4.19.90-24.4.v2101.ky10.x86_64
##伺服器配置:
192.168.26.100 observer01
192.168.26.120 observer02
192.168.26.130 observer03
##介質:
oceanbase-all-in-one-4.0.0.0-100120230113164218.el7.x86_64.tar
##目錄規劃:
/data 為資料盤
/redo 存放 redo 日誌
/obproxy obproxy
/home/admin/oceanbase 存放 OceanBase 資料庫的二進位制檔案和執行日誌。
##記憶體和磁碟空間要求:
MEM:free -g要求available>=3G
/目錄空間要求>=50G
2、部署前配置,要求yum配置映象,且可用。
mv /etc/yum.repos.d/kylin_x86_64.repo /etc/yum.repos.d/kylin_x86_64.repobak
cat <<EOF>> /etc/yum.repos.d/kylin_local.repo
[kylin10]
name=added from: file:///mnt
baseurl=file:///mnt
enabled=1
gpgcheck=0
EOF
mount /dev/sr0 /mnt
a、ssh互信配置,相比官網的原指令碼進行相應調整
sh configssh.sh
b、配置時鐘同步,確保時間一致
c、配置磁碟
mkdir /data
mkdir /redo
mkdir /obproxy
mkdir /obagent
mkdir /prometheus
mkdir /grafana
d、配置limit
cp /etc/security/limits.conf /etc/security/limits.conf_bak
ulimit -a
cat <<EOF>> /etc/security/limits.conf
root soft nofile 655350
root hard nofile 655350
* soft nofile 655350
* hard nofile 655350
* soft stack 20480
* hard stack 20480
* soft nproc 655360
* hard nproc 655360
* soft core unlimited
* hard core unlimited
EOF
cat /etc/security/limits.conf
ulimit -a
e、修改sysctl配置
cp /etc/sysctl.conf /etc/sysctl.conf_bak
cat <<EOF>> /etc/sysctl.conf
# for oceanbase
## 修改核心非同步 I/O 限制
fs.aio-max-nr=1048576
## 網路最佳化
net.core.somaxconn = 2048
net.core.netdev_max_backlog = 10000
net.core.rmem_default = 16777216
net.core.wmem_default = 16777216
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.ip_local_port_range = 3500 65535
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_syncookies = 0
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_slow_start_after_idle=0
vm.swappiness = 0
vm.min_free_kbytes = 2097152
# 此處為 OceanBase 資料庫的 data 目錄
kernel.core_pattern = /data/core-%e-%p-%t
EOF
sysctl -p
f、關閉防火牆
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld
g、關閉 SELinux
cp /etc/selinux/config /etc/selinux/config_bak
sed -i 's#SELINUX=enforcing#SELINUX=disabled#' /etc/selinux/config
cat /etc/selinux/config | grep -v '^#' | grep -v '^$'
h、建立使用者
useradd -U admin -d /home/admin -s /bin/bash
echo "admin" | passwd --stdin admin
echo "admin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
chown -R admin:admin /data
chown -R admin:admin /redo
chown -R admin:admin /obproxy
chown -R admin:admin /obagent
chown -R admin:admin /prometheus
chown -R admin:admin /grafana
3、開始安裝
a、
[root@observer01 soft]# tar -xzf oceanbase-all-in-one-4.0.0.0-100120230113164218.el7.x86_64.tar.gz
[root@observer01 soft]# cd oceanbase-all-in-one/
[root@observer01 oceanbase-all-in-one]# ls
bin conf obclient obd README.md rpms VERSION
[root@observer01 oceanbase-all-in-one]# cd bin/
[root@observer01 bin]# sh install.sh
......
add auto set env logic to profile: /root/.bash_profile
#####################################################################
Install Finished
=====================================================================
Setup Environment: source ~/.oceanbase-all-in-one/bin/env.sh
Quick Start: obd demo
More Details: obd -h
=====================================================================
配置檔案路徑:
~/.oceanbase-all-in-one/conf/autodeploy
[root@observer01 autodeploy]# cat all-components.yaml | grep -v ' #' | grep -v '^$' > baseOS.yaml
##all-components.yaml該檔案中包含預設的多數引數,如需調整可以根據檔案內容取消註釋即可
[root@observer01 autodeploy]# cat baseOS.yaml
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: observer01
ip: 192.168.26.100
- name: observer02
ip: 192.168.26.120
- name: observer03
ip: 192.168.26.130
global:
home_path: /home/admin/oceanbase
server1:
zone: zone1
server2:
zone: zone2
server3:
zone: zone3
obproxy-ce:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
servers:
- 192.168.26.100
global:
listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
home_path: /obproxy
enable_cluster_checkout: false
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
obagent:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
# The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce.
servers:
- name: observer01
ip: 192.168.26.100
- name: observer02
ip: 192.168.26.120
- name: observer03
ip: 192.168.26.130
global:
home_path: /obagent
prometheus:
servers:
- 192.168.26.100
depends:
- obagent
global:
home_path: /prometheus
grafana:
servers:
- 192.168.26.100
depends:
- prometheus
global:
home_path: /grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
b、部署叢集
[root@observer01 autodeploy]# obd
You need to give some commands.
Try `obd --help` for more information.
Usage: obd <command> [options]
Available commands:
cluster Deploy and manage a cluster.
demo Quickly start
mirror Manage a component repository for OBD.
repo Manage local repository for OBD.
test Run test for a running deployment.
update Update OBD.
Options:
--version show program's version number and exit
-h, --help Show help and exit.
-v, --verbose Activate verbose output.
[root@observer01 autodeploy]# obd cluster autodeploy gycob -c /root/.oceanbase-all-in-one/conf/autodeploy/baseOS.yaml
[ERROR] Parsing error:
while constructing a mapping
in "/root/.obd/cluster/gycob/config.yaml", line 8, column 1
found duplicate key "obproxy-ce" with value "ordereddict([('depends', ['oceanbase-ce']), ('servers', ['192.168.26.120']), ('global', ordereddict([('home_path', '/obproxy
'), ('skip_proxy_sys_private_check', True), ('enable_strict_kernel_release', False)]))])" (original value: "ordereddict([('depends', ['oceanbase-ce']), ('servers', ['192.168.26.100']), ('global', ordereddict([('listen_port', 2883), ('prometheus_listen_port', 2884), ('home_path', '/obproxy'), ('enable_cluster_checkout', False), ('skip_proxy_sys_private_check', True), ('enable_strict_kernel_release', False)]))])") in "/root/.obd/cluster/gycob/config.yaml", line 38, column 1
To suppress this check see:
Duplicate keys will become an error in future releases, and are errors
by default when using the new API.
See .
原因:obproxy-ce重複
[root@observer01 ~]# obd cluster autodeploy gycob -c /root/.oceanbase-all-in-one/conf/autodeploy/baseOS.yaml
[root@observer01 ~]# cat /root/.oceanbase-all-in-one/conf/autodeploy/baseOS.yaml
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: observer01
ip: 192.168.26.100
- name: observer02
ip: 192.168.26.120
- name: observer03
ip: 192.168.26.130
global:
home_path: /home/admin/oceanbase
server1:
zone: zone1
server2:
zone: zone2
server3:
zone: zone3
obproxy-ce:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
servers:
- 192.168.26.100
global:
listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
home_path: /obproxy
enable_cluster_checkout: false
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
obagent:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
# The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce.
servers:
- name: observer01
ip: 192.168.26.100
- name: observer02
ip: 192.168.26.120
- name: observer03
ip: 192.168.26.130
global:
home_path: /obagent
prometheus:
servers:
- 192.168.26.100
depends:
- obagent
global:
home_path: /prometheus
port: 9091 # The http port to use. Along with address, corresponds to the `web.listen-address` parameter.
grafana:
servers:
- 192.168.26.100
depends:
- prometheus
global:
home_path: /grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
[root@observer01 ~]#
[root@observer01 ~]# obd cluster display gycob
Get local repositories and plugins ok
Open ssh connection ok
Cluster status check ok
Connect to observer ok
Wait for observer init ok
+--------------------------------------------------+
| observer |
+----------------+---------+------+-------+--------+
| ip | version | port | zone | status |
+----------------+---------+------+-------+--------+
| 192.168.26.100 | 4.0.0.0 | 2881 | zone1 | ACTIVE |
| 192.168.26.120 | 4.0.0.0 | 2881 | zone1 | ACTIVE |
| 192.168.26.130 | 4.0.0.0 | 2881 | zone1 | ACTIVE |
+----------------+---------+------+-------+--------+
obclient -h192.168.26.100 -P2881 -uroot -Doceanbase -A
Connect to obproxy ok
+--------------------------------------------------+
| obproxy |
+----------------+------+-----------------+--------+
| ip | port | prometheus_port | status |
+----------------+------+-----------------+--------+
| 192.168.26.100 | 2883 | 2884 | active |
+----------------+------+-----------------+--------+
obclient -h192.168.26.100 -P2883 -uroot -Doceanbase -A
+----------------------------------------------------+
| obagent |
+----------------+-------------+------------+--------+
| ip | server_port | pprof_port | status |
+----------------+-------------+------------+--------+
| 192.168.26.100 | 8088 | 8089 | active |
| 192.168.26.120 | 8088 | 8089 | active |
| 192.168.26.130 | 8088 | 8089 | active |
+----------------+-------------+------------+--------+
Connect to Prometheus ok
+-------------------------------------------------------+
| prometheus |
+----------------------------+------+----------+--------+
| url | user | password | status |
+----------------------------+------+----------+--------+
| http://192.168.26.100:9091 | | | active |
+----------------------------+------+----------+--------+
Connect to grafana ok
+---------------------------------------------------------------------+
| grafana |
+----------------------------------------+-------+-----------+--------+
| url | user | password | status |
+----------------------------------------+-------+-----------+--------+
| http://192.168.26.100:3000/d/oceanbase | admin | oceanbase | active |
+----------------------------------------+-------+-----------+--------+
[root@observer01 ~]#
叢集日常操作命令:
[root@observer01 ~]# obd cluster
autodeploy check4ocp chst deploy destroy display edit-config list redeploy reinstall reload restart start stop tenant upgrade
[root@observer01 ~]# cat configssh.sh
#!/usr/bin/bash
SERVERS=("root@192.168.26.100" "root@192.168.26.120" "root@192.168.26.130")
PASSWORD="BOB98ufx2"
keygen() {
yum -y install expect
expect -c "
spawn ssh-keygen -t rsa
expect {
*(/root/.ssh/id_rsa):* { send -- \r;exp_continue}
*(y/n)* { send -- y\r;exp_continue}
*Enter* { send -- \r;exp_continue}
*(y/n)* { send -- y\r;exp_continue}
*Enter* { send -- \r;exp_continue}
eof {exit 0}
}
expect eof
"
}
copy(){
expect -c "
set timeout 5
spawn ssh-copy-id $1
expect {
*(yes/no* { send -- yes\r; exp_continue }
*password:* { send -- $PASSWORD\r; exp_continue}
eof {exit 0}
}
expect eof
"
}
ssh_copy_id_to_all(){
keygen ;
for host in ${SERVERS[@]}
do
copy $host
done
}
ssh_copy_id_to_all
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/24585765/viewspace-2938691/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- redhat安裝雙節點cassandra叢集Redhat
- 容器雲平臺物理叢集配置實踐
- hadoop叢集多節點安裝詳解Hadoop
- 安裝 Hadoop:設定單節點 Hadoop 叢集Hadoop
- uWSGI叢集平臺
- kubernetes實踐之五:Node節點安裝
- consul 多節點/單節點叢集搭建
- 4.2 叢集節點初步搭建
- 安裝Greenplum 5.2 叢集實戰
- oceanbase 安裝叢集 install OB rpm報錯處理
- MongoDB叢集搭建(包括隱藏節點,仲裁節點)MongoDB
- 安裝 REDIS 叢集Redis
- 安裝Kafka叢集Kafka
- 三艾雲 Kubernetes 叢集最佳實踐
- 【Docker】Docker三劍客實踐之部署叢集Docker
- 【K8S】基於單Master節點安裝K8S叢集K8SAST
- 美團點評Kubernetes叢集管理實踐
- HAC叢集更改IP(單節點更改、全部節點更改)
- FreeBSD下安裝配置Hadoop叢集(三)Hadoop
- Oracle叢集軟體管理-新增和刪除叢集節點Oracle
- linux搭建kafka叢集,多master節點叢集說明LinuxKafkaAST
- Linux4.0平臺下Oracle10g安裝LinuxOracle
- Redis 4.0叢集環境部署Redis
- 二進位制安裝k8s高可用叢集(六):Node節點配置K8S
- 完整安裝always on叢集
- 安裝Consul叢集
- 快速安裝 kafka 叢集Kafka
- FastDFS 叢集 安裝 配置AST
- Redis服務之叢集節點管理Redis
- Redis Manager 叢集管理與節點管理Redis
- Jedis操作單節點redis,叢集及redisTemplate操作redis叢集(一)Redis
- kubernetes實踐之三十四: Master節點安裝與配置AST
- MySQL DB 叢集管理平臺--orchestratorMySql
- 安全測試之 kali_liunx DVWA 實踐平臺安裝
- Docker Swarm 叢集搭建實踐DockerSwarm
- influxDB叢集模式實踐UX模式
- RabbitMQ叢集運維實踐MQ運維
- 學習筆記(6):搞定大資料平臺:從入門到實戰-叢集JDK安裝配置筆記大資料JDK