安裝Greenplum 5.2 叢集實戰
第一,清理目錄
source /usr/local/greenplum-db/greenplum_path.sh
================== 架構目標 ============================
mdw sdw1 sdw2 sdw3
master seg0p seg2p seg4p
seg1p seg3p seg5p
seg5m seg0m seg2m
seg4m seg1m seg3m
smdw
一、準備工作 -- 所有節點都要操作
1.1 關閉防火牆
#systemctl status firewalld (檢視防火牆狀態)
#systemctl stop firewalld (停止防火牆)
#systemctl disable firewalld (設定防火牆不可用)
systemctl stop firewalld && systemctl disable firewalld
systemctl status firewalld
假設有 iptables
service iptables stop 停止防火牆服務,重啟電腦後仍然會開啟
chkconfig iptables off 關閉防火牆服務開機啟動,重啟後生效
1.2 關閉 selinux -
vim /etc/selinux/config
SELINUX=disabled
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sestatus
1.3 設定主機名
hostnamectl set-hostname sdw1
hostnamectl status 狀態
/etc/sysconfig/network
1.4 修改主機名
vi /etc/hosts
10.102.254.24 sdw1
10.102.254.25 sdw2
10.102.254.26 sdw3
10.102.254.27 mdw
1.5 系統核心引數最佳化
vi /etc/sysctl.conf
kernel.shmmax = 500000000
kernel.shmmni = 4096
kernel.shmall = 4000000000
kernel.sem = 250 512000 100 2048
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.ip_forward = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.ipv4.ip_local_port_range = 1025 65535
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.overcommit_memory = 2
sysctl -p
cat > /etc/sysctl.conf << EOF
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.shmmax = 500000000
kernel.shmmni = 4096
kernel.shmall = 4000000000
kernel.sem = 500 1024000 200 4096
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.ip_forward = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.ipv4.ip_local_port_range = 1025 65535
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.overcommit_memory = 2
vm.swappiness = 1
kernel.pid_max = 655350
EOF
sysctl -p
1.6 修改 Linux 最大限制
cat /etc/security/limits.conf
vi /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072
cat > /etc/security/limits.conf << EOF
* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072
EOF
如何是 rhel 6.x 請注意 /etc/security/limits.d/90-nproc.conf ,詳細情況請見文件
1.7 裝置與 IO- 檔案系統
設定 XFS 檔案系統並掛載
EXT4 是第四代擴充套件檔案系統(英語: Fourth EXtended filesystem ,縮寫為 ext4 )是 Linux 系統下的日誌檔案系統,是 ext3 檔案系統的後繼版本。
Ext4 的檔案系統容量達到 1EB ,而檔案容量則達到 16TB ,這是一個非常大的數字了。對一般的桌上型電腦和伺服器而言,這可能並不重要,但對於大型磁碟陣列的使用者而言,這就非常重要了。
XFS 是一個 64 位檔案系統,最大支援 8EB 減 1 位元組的單個檔案系統,實際部署時取決於宿主作業系統的最大塊限制。對於一個 32 位 Linux 系統,檔案和檔案系統的大小會被限制在 16TB 。
二者各有特點,而效能表現基本上是差不多的。例如,谷歌公司就考慮將 EXT2 系統升級,最終確定為 EXT4 系統。谷歌公司表示,他們還考慮過 XFS 和 JFS 。結果顯示, EXT4 和 XFS 的表現類似,不過從 EXT2 升級到 EXT4 比升級到 XFS 容易。
例子:
cat >> /etc/fstab << EOF
/dev/sdb1 /greenplum xfs rw,nodev,noatime,inode64,allocsize=16m 0 0
EOF
rw,nodev,noatime,nobarrier,inode64
cat /etc/fstab
1.8 磁碟訪問策略
Linux 磁碟 I/O 排程器對磁碟的訪問支援不同的策略,預設的為 CFQ , GP 建議設定為 deadline
檢視磁碟的 I/O 排程策略,看到預設的為 [cfq]
The deadline scheduler option is recommended. To specify a scheduler until the next system reboot,
run the following:
# echo schedulername > /sys/block/devname/queue/scheduler
echo deadline > /sys/block/sda/queue/scheduler
linux 7
# grubby --update-kernel=ALL --args="elevator=deadline"
grubby --info=ALL
1.8 調整磁碟預讀扇區數
fdisk -l
檢查
/sbin/blockdev --getra /dev/sda
設定
/sbin/blockdev --setra 16384 /dev/sda
在引數檔案 /etc/rc.d/rc.local 中增加
DELL : blockdev --setra 16384 /dev/sd* ( 紅色部分為硬碟裝置標識 ) HP:blockdev --setra 16384 /dev/cciss/c?d?*
1.9 禁用 THP
On systems that use grub2 such as RHEL 7.x or CentOS 7.x, use the system utility grubby. This
command adds the parameter when run as root.
# grubby --update-kernel=ALL --args="transparent_hugepage=never"
After adding the parameter, reboot the system.
This cat command checks the state of THP. The output indicates that THP is disabled.
$ cat /sys/kernel/mm/*transparent_hugepage/enabled
always [never]
服務方式註冊
# 建立 init.d 指令碼
echo '#!/bin/sh
case $1 in
start)
if [ -d /sys/kernel/mm/transparent_hugepage ]; then
thp_path=/sys/kernel/mm/transparent_hugepage
elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then
thp_path=/sys/kernel/mm/redhat_transparent_hugepage
else
exit 0
fi
echo never > ${thp_path}/enabled
echo never > ${thp_path}/defrag
unset thp_path
;;
esac' > /etc/init.d/disable-transparent-hugepages
# 註冊 systemd 檔案
echo '[Unit]
Description=Disable Transparent Hugepages
After=multi-user.target
[Service]
ExecStart=/etc/init.d/disable-transparent-hugepages start
Type=simple
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/disable-thp.service
# 磁碟預讀扇區數
/sbin/blockdev --getra /dev/sdb1 # 檢視大小
/sbin/blockdev --setra 65535 /dev/sdb1 # 設定大小
# 建立 init.d 指令碼
echo '#!/bin/sh
device_name=/dev/sdb1
case $1 in
start)
if `mount | grep "^${device_name}" > /dev/null`;then
/sbin/blockdev --setra 65535 ${device_name}
else
exit 0
fi
unset device_name
;;
esac' > /etc/init.d/blockdev-setra-sdb
# 註冊 systemd 檔案
echo '[Unit]
Description=Blocdev --setra N
After=multi-user.target
[Service]
ExecStart=/etc/init.d/blockdev-setra-sdb start
Type=simple
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/blockdev-setra-sdb.service
# 授權並設定開機啟動
chmod 755 /etc/init.d/disable-transparent-hugepages
chmod 755 /etc/init.d/blockdev-setra-sdb
chmod 755 /etc/systemd/system/disable-thp.service
chmod 755 /etc/systemd/system/blockdev-setra-sdb.service
systemctl enable disable-thp blockdev-setra-sdb
1.10 Disable IPC object removal for RHEL 7 or CentOS 7
Set this parameter in /etc/systemd/logind.conf on the Greenplum
Database host systems.
RemoveIPC=no
The setting takes effect after restarting the systemd-login service or rebooting the system. To
restart the service, run this command as the root user.
service systemd-logind restart
cat /etc/systemd/logind.conf
1.11 時間同步
/etc/chrony.conf
systemctl status chronyd.service -- 檢視狀態
systemctl start chronyd.service -- 啟動
systemctl enable chronyd.service -- 使其開機自啟
systemctl status chronyd.service
server 10.1.3.1 prefer
檢視時間同步源
chronyc sources -v
chronyc sourcestats -v
1.12 控制 ssh 連線數
/etc/ssh/sshd_config
MaxStartups 10:30:200
systemctl restart sshd.service
1.13 系統依賴包
yum -y install epel-release
yum -y install wget cmake3 git gcc gcc-c++ bison flex libedit-devel zlib zlib-devel perl-devel perl-ExtUtils-Embed python-devel libevent libevent-devel libxml2 libxml2-devel libcurl libcurl-devel bzip2 bzip2-devel net-tools libffi-devel openssl-devel
2 安裝
2.1 建立使用者和組
# groupadd gpadmin
# useradd gpadmin -g gpadmin
# passwd gpadmin
New password: <changeme>
Retype new password: <changeme>
echo gpadmin | passwd gpadmin --stdin
2.2 root 使用者解壓縮和安裝
./greenplum-db-5.10.2-rhel6-x86_64.bin
I HAVE READ AND AGREE TO THE TERMS OF THE ABOVE PIVOTAL SOFTWARE
LICENSE AGREEMENT.
********************************************************************************
Do you accept the Pivotal Database license agreement? [yes|no]
********************************************************************************
yes
********************************************************************************
Provide the installation path for Greenplum Database or press ENTER to
accept the default installation path: /usr/local/greenplum-db-5.10.2
********************************************************************************
********************************************************************************
Install Greenplum Database into /usr/local/greenplum-db-5.10.2? [yes|no]
********************************************************************************
yes
********************************************************************************
/usr/local/greenplum-db-5.10.2 does not exist.
Create /usr/local/greenplum-db-5.10.2 ? [yes|no]
(Selecting no will exit the installer)
********************************************************************************
安裝完成後授權
# chown -R gpadmin /usr/local/greenplum* (在建立 gpadmin 後執行)
# chgrp -R gpadmin /usr/local/greenplum* (在建立 gpadmin 後執行)
2.3 編輯環境變數
cat >> .bashrc << EOF
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
EOF
source .bashrc
cat >> /home/gpadmin/.bash_profile <<EOF
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
export PGPORT=5432
export PGDATABASE=archdata
EOF
source /home/gpadmin/.bash_profile
2.4 進行檔案配置
切換 root
source /usr/local/greenplum-db/greenplum_path.sh
------ 只在 mdw , smdw 執行
mkdir /home/gpadmin/gpconfig
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig
------ 只在 mdw , smdw 執行
cat >> /home/gpadmin/gpconfig/all_host <<EOF
mdw
sdw1
sdw2
sdw3
EOF
------ 只在 mdw , smdw 執行
cat >> /home/gpadmin/gpconfig/all_segment <<EOF
sdw1
sdw2
sdw3
EOF
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/all_host
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/all_segment
2.5 設定主機免密碼登陸 -
source /usr/local/greenplum-db/greenplum_path.sh
/usr/local/greenplum-db/bin/gpssh-exkeys -f /home/gpadmin/gpconfig/all_host
2.6 確認檢查主機連線狀態
gpssh -f /home/gpadmin/gpconfig/all_host -e "ls -l"
2.7 批次建立其他節點的使用者
gpssh -f /home/gpadmin/gpconfig/all_segment
groupadd gpadmin
useradd gpadmin -g gpadmin
passwd gpadmin
echo gpadmin | passwd gpadmin --stdin
gpadmin 使用者 - 互信
source /usr/local/greenplum-db/greenplum_path.sh
/usr/local/greenplum-db/bin/gpssh-exkeys -f /home/gpadmin/gpconfig/all_host
gpssh -f /home/gpadmin/gpconfig/all_host -e "ls -l"
檢查時間同步
gpssh -f /home/gpadmin/gpconfig/all_host -e "date"
2.8 分發所有 seg 節點軟體
root 執行
source /usr/local/greenplum-db/greenplum_path.sh
gpseginstall -f /home/gpadmin/gpconfig/all_host -u gpadmin -p gpadmin
2.9 檢查安裝情況
o Log in as the gpadmin user and source
• source /usr/local/greenplum-db/greenplum_path.sh
o Use the gpssh utility to see if you can login to all hosts without a password prompt
•
2.10 建立相關目錄 root 使用者
mkdir -p /greenplum/gpdata/master
chown gpadmin:gpadmin /greenplum/gpdata/master
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary1'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary2'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/mirror1'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/mirror2'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
or 批次建立
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'mkdir -p /greenplum/gpdata/primary{1..2}'
gpssh -f /home/gpadmin/gpconfig/all_segment -e 'chown -R gpadmin:gpadmin /greenplum/gpdata'
2.11 驗證系統
檢查系統引數和測試效能
檢查命令: gpcheck -f host_file -m mdw -ssmdw
Validating Hardware Performance
o gpcheckperf can be used to identify hardware and system-level issues on the machines in your Greenplum
Database array.
o Network Performance (gpnetbench*)
• gpcheckperf -f hostfile_gpchecknet_ic1 -r N -d /tmp > subnet1.out
o Disk I/O Performance (dd test) & Memory Bandwidth (stream test)
• gpcheckperf -f hostfile_gpcheckperf -r ds -D -d /data/primary -d /data/mirror
驗證 OS 配置
source /usr/local/greenplum-db/greenplum_path.sh
gpcheck -f /home/gpadmin/gpconfig/all_host -m mdw
驗證硬體效能 -- 這個需要確認(網路和 IO )
gpcheckperf -f /home/gpadmin/gpconfig/all_host -r N -d /tmp > checknetwork.out
[root@mdw greenplum-db]# cat checknetwork.out
/usr/local/greenplum-db/./bin/gpcheckperf -f /home/gpadmin/gpconfig/all_host -r N -d /tmp
-------------------
-- NETPERF TEST
-------------------
====================
== RESULT
====================
Netperf bisection bandwidth test
mdw -> sdw1 = 112.340000
sdw2 -> sdw3 = 112.340000
sdw1 -> mdw = 112.330000
sdw3 -> sdw2 = 112.330000
Summary:
sum = 449.34 MB/sec
min = 112.33 MB/sec
max = 112.34 MB/sec
avg = 112.33 MB/sec
median = 112.34 MB/sec
gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1 > checkDISKIO.out
[root@mdw greenplum-db]# gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1
/usr/local/greenplum-db/./bin/gpcheckperf -f /home/gpadmin/gpconfig/all_host -r ds -D -d /greenplum/gpdata/primary1 -d /greenplum/gpdata/mirror1
--------------------
-- DISK WRITE TEST
--------------------
--------------------
-- DISK READ TEST
--------------------
--------------------
-- STREAM TEST
--------------------
====================
== RESULT
====================
disk write avg time (sec): 20.88
disk write tot bytes: 132920115200
disk write tot bandwidth (MB/s): 6074.65
disk write min bandwidth (MB/s): 1476.04 [ mdw]
disk write max bandwidth (MB/s): 1551.18 [sdw3]
-- per host bandwidth --
disk write bandwidth (MB/s): 1476.04 [ mdw]
disk write bandwidth (MB/s): 1537.63 [sdw1]
disk write bandwidth (MB/s): 1509.80 [sdw2]
disk write bandwidth (MB/s): 1551.18 [sdw3]
disk read avg time (sec): 59.80
disk read tot bytes: 132920115200
disk read tot bandwidth (MB/s): 2175.57
disk read min bandwidth (MB/s): 454.54 [sdw2]
disk read max bandwidth (MB/s): 700.04 [sdw1]
-- per host bandwidth --
disk read bandwidth (MB/s): 520.03 [ mdw]
disk read bandwidth (MB/s): 700.04 [sdw1]
disk read bandwidth (MB/s): 454.54 [sdw2]
disk read bandwidth (MB/s): 500.96 [sdw3]
stream tot bandwidth (MB/s): 49348.52
stream min bandwidth (MB/s): 12297.76 [ mdw]
stream max bandwidth (MB/s): 12388.57 [sdw2]
-- per host bandwidth --
stream bandwidth (MB/s): 12297.76 [ mdw]
stream bandwidth (MB/s): 12321.47 [sdw1]
stream bandwidth (MB/s): 12388.57 [sdw2]
stream bandwidth (MB/s): 12340.73 [sdw3]
2.12 初始化 database
cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config /home/gpadmin/gpconfig/gpinitsystem_config
cat >> /home/gpadmin/gpconfig/hostfile_gpinitsystem <<EOF
sdw1
sdw2
sdw3
EOF
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/gpinitsystem_config
chown -R gpadmin:gpadmin /home/gpadmin/gpconfig/hostfile_gpinitsystem
調整引數
su - gpadmin
ARRAY_NAME="EMC Greenplum DW"
PORT_BASE=40000
SEG_PREFIX=gpseg
declare -a DATA_DIRECTORY=(/greenplum/gpdata/primary1 /greenplum/gpdata/primary2)
MASTER_HOSTNAME=mdw
MASTER_DIRECTORY=/greenplum/gpdata/master
MASTER_PORT=5432
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
MIRROR_PORT_BASE=50000
REPLICATION_PORT_BASE=41000
MIRROR_REPLICATION_PORT_BASE=51000
declare -a MIRROR_DATA_DIRECTORY=(/greenplum/gpdata/mirror1 /greenplum/gpdata/mirror2)
vim /home/gpadmin/gpconfig/gpinitsystem_config
修改如下
[gpadmin@mdw ~]$ cat /home/gpadmin/gpconfig/gpinitsystem_config
# FILE NAME: gpinitsystem_config
# Configuration file needed by the gpinitsystem
################################################
#### REQUIRED PARAMETERS
################################################
#### Name of this Greenplum system enclosed in quotes.
ARRAY_NAME="Greenplum Data Platform"
#### Naming convention for utility-generated data directories.
SEG_PREFIX=gpseg
#### Base number by which primary segment port numbers
#### are calculated.
PORT_BASE=40000
#### File system location(s) where primary segment data directories
#### will be created. The number of locations in the list dictate
#### the number of primary segments that will get created per
#### physical host (if multiple addresses for a host are listed in
#### the hostfile, the number of segments will be spread evenly across
#### the specified interface addresses).
declare -a DATA_DIRECTORY=(/greenplum/gpdata/primary1 /greenplum/gpdata/primary2)
#### OS-configured hostname or IP address of the master host.
MASTER_HOSTNAME=mdw
#### File system location where the master data directory
#### will be created.
MASTER_DIRECTORY=/greenplum/gpdata/master
#### Port number for the master instance.
MASTER_PORT=5432
#### Shell utility used to connect to remote hosts.
TRUSTED_SHELL=ssh
#### Maximum log file segments between automatic WAL checkpoints.
CHECK_POINT_SEGMENTS=8
#### Default server-side character set encoding.
ENCODING=UNICODE
################################################
#### OPTIONAL MIRROR PARAMETERS
################################################
#### Base number by which mirror segment port numbers
#### are calculated.
MIRROR_PORT_BASE=50000
#### Base number by which primary file replication port
#### numbers are calculated.
REPLICATION_PORT_BASE=41000
#### Base number by which mirror file replication port
#### numbers are calculated.
MIRROR_REPLICATION_PORT_BASE=51000
#### File system location(s) where mirror segment data directories
#### will be created. The number of mirror locations must equal the
#### number of primary locations as specified in the
#### DATA_DIRECTORY parameter.
declare -a MIRROR_DATA_DIRECTORY=(/greenplum/gpdata/mirror1 /greenplum/gpdata/mirror2)
################################################
#### OTHER OPTIONAL PARAMETERS
################################################
#### Create a database of this name after initialization.
#DATABASE_NAME=name_of_database
#### Specify the location of the host address file here instead of
#### with the the -h option of gpinitsystem.
#MACHINE_LIST_FILE=/home/gpadmin/gpconfigs/hostfile_gpinitsystem
初始化 database
gpadmin 使用者
gpinitsystem -c /home/gpadmin/gpconfig/gpinitsystem_config -h /home/gpadmin/gpconfig/hostfile_gpinitsystem
如何新增 master standby 和修改 mirror 分佈策略 spread mirror
gpinitsystem -c gpconfigs/gpinitsystem_config -h gpconfigs/hostfile_gpinitsystem -s
standby_master_hostname -S (with a standby master and a spread mirror configuration)
2.13 配置和檢查環境變數
MASTER_DATA_DIRECTORY=/data/master/gpseg-1
GPHOME=/usr/local/greenplum-db
PGDATABASE=gpadmin
[gpadmin@mdw ~]$ cat .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
export MASTER_DATA_DIRECTORY=/greenplum/gpdata/master/gpseg-1
source /usr/local/greenplum-db/greenplum_path.sh
export PGPORT=5432
export PGDATABASE=archdata
2.14 進行叢集的檢查和日常關閉
檢視程式命令: $gpssh -f /home/gpadmin/gpconfig/all_host -e "ps -eaf|grep green"
設定 gpadmin 遠端訪問密碼
psql postgres gpadmin
alter user gpadmin encrypted password 'gpadmin';
\q
查詢測試
psql -hmdw -p 5432 -d postgres -U gpadmin -c 'select dfhostname, dfspace,dfdevice from gp_toolkit.gp_disk_free order by dfhostname;'
[gpadmin@mdw ~]$ psql -hmdw -p 5432 -d postgres -U gpadmin -c 'select dfhostname, dfspace,dfdevice from gp_toolkit.gp_disk_free order by dfhostname;'
dfhostname | dfspace | dfdevice
------------+----------+----------------------------
sdw1 | 98708120 | /dev/mapper/VolGroup-root
sdw1 | 98708120 | /dev/mapper/VolGroup-root
sdw2 | 98705600 | /dev/mapper/VolGroup-root
sdw2 | 98705600 | /dev/mapper/VolGroup-root
sdw3 | 98705144 | /dev/mapper/VolGroup-root
sdw3 | 98705144 | /dev/mapper/VolGroup-root
(6 rows)
psql -h hmdw -p 5432 -d postgres -U gpadmin -c '\l+'
[gpadmin@mdw ~]$ psql -h mdw -p 5432 -d postgres -U gpadmin -c '\l+'
List of databases
Name | Owner | Encoding | Access privileges | Size | Tablespace | Description
-----------+---------+----------+---------------------+-------+------------+---------------------------
postgres | gpadmin | UTF8 | | 73 MB | pg_default |
template0 | gpadmin | UTF8 | =c/gpadmin | 72 MB | pg_default |
: gpadmin=CTc/gpadmin
template1 | gpadmin | UTF8 | =c/gpadmin | 73 MB | pg_default | default template database
: gpadmin=CTc/gpadmin
(3 rows)
[gpadmin@mdw ~]$
------- 啟停資料庫,在 mdw 執行 --------
在 master 節點 gpadmin 使用者執行 gpstart -a 啟動叢集,不加 a 需要輸入 yes 確認。
在 master 節點 gpadmin 使用者執行 gpstop -a 關閉叢集,不加 a 需要輸入 yes 確認。
gpstate
[gpadmin@mdw ~]$ gpstate
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Starting gpstate with args:
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4'
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-master Greenplum Version: 'PostgreSQL 8.3.23 (Greenplum Database 5.10.2 build commit:b3c02f3acd880e2d676dacea36be015e4a3826d4) on x86_64-pc-linux-gnu, compiled by GCC gcc (GCC) 6.2.0, 64-bit compiled on Aug 10 2018 07:30:24'
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Obtaining Segment details from master...
20200419:00:52:03:001506 gpstate:mdw:gpadmin-[INFO]:-Gathering data from segments...
.
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-Greenplum instance status summary
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Master instance = Active
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Master standby = No master standby configured
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total segment instance count from metadata = 12
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Primary Segment Status
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segment valid (at master) = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total primary segment failures (at master) = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Mirror Segment Status
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segment valid (at master) = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total mirror segment failures (at master) = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of postmaster.pid PIDs found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number of /tmp lock files found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes missing = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number postmaster processes found = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number mirror segments acting as primary segments = 0
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:- Total number mirror segments acting as mirror segments = 6
20200419:00:52:04:001506 gpstate:mdw:gpadmin-[INFO]:-----------------------------------------------------
[gpadmin@mdw ~]$
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/500314/viewspace-2687054/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- 【greenplum】greenplum叢集搭建
- 安裝Kafka叢集Kafka
- 安裝Consul叢集
- greenplum 4.3.33安裝
- 完整安裝always on叢集
- 快速安裝 kafka 叢集Kafka
- greenplum 6.9 for centos7叢集搭建步驟CentOS
- 【greenplum】greenplum叢集資料庫初始化輸出內容樣例資料庫
- redis-3.0.6 安裝叢集Redis
- redis 5.0 叢集的安裝Redis
- 安裝Zookeeper和Kafka叢集Kafka
- GreenPlum資料庫安裝資料庫
- greenplum單節點安裝
- Centos7安裝Nacos單機模式以及叢集模式(包含nignx安裝以及實現叢集)的相關配置CentOS模式
- Zookeeper3.4.14(單叢集)、Kafka_2.12-2.2.2(叢集)安裝Kafka
- Ubuntu上kubeadm安裝Kubernetes叢集Ubuntu
- Redis安裝+叢集+效能監控Redis
- CDH安裝大資料叢集大資料
- CentOS7 安裝PG叢集CentOS
- Cloudera Manager安裝 & 搭建CDH叢集Cloud
- ARM架構安裝Kubernetes叢集架構
- KubeSphere 部署 Kafka 叢集實戰指南Kafka
- 【Redis叢集實戰】Redis Cluster 部署Redis
- Jenkins叢集下的pipeline實戰Jenkins
- 在Ubuntu 18.04.1上安裝Hadoop叢集UbuntuHadoop
- Zookeeper-3.4.10 叢集的安裝配置
- Cassandra安裝及分散式叢集搭建分散式
- centos安裝k8s叢集CentOSK8S
- Ubuntu 安裝k8s叢集UbuntuK8S
- KubeSphere 最佳實戰:Kubernetes 部署叢集模式 Nacos 實戰指南模式
- 部署rabbitMQ映象叢集實戰測試MQ
- Greenplum_原始碼編譯安裝和RPM包安裝原始碼編譯
- Kubernetes安裝之三:etcd叢集的配置
- kubernetes叢集的安裝異常彙總
- Redis安裝之叢集-哨兵模式(sentinel)模式Redis模式
- Linux原始碼安裝RabbitMQ高可用叢集Linux原始碼MQ
- 分散式 PostgreSQL 叢集(Citus)官方安裝指南分散式SQL
- 安裝配置 zookeeper (單機非叢集模式)模式