基於4個節點的corosync + pacemaker PG 資料庫 HA 配置

babyyellow發表於2013-06-09
最近測試完成了一套4節點的PG 資料庫

實施策略本著省錢夠用的原則,採用了3拖一方案

3套主庫,一套從庫

從庫採用單機多例項模式為3套主庫提供hot standby 備庫,用以日常報表,查詢,不做業務支援。


歡迎各位同學提出寶貴意見。

PS : heartbeat  提供的pgsql 的啟動是不支援hot  standby 切換的,我們在從庫的程式碼上了適當修改。

主要修改原則,在前面的文章裡已經做了說明了。

有需要的同學聯絡我。

下面是HA 的配置: 系統原則不變,遮蔽了部分敏感資訊。

node db1
node db2
node db3
node db4
primitive fence_x stonith:fence_ipmilan \
    params ipaddr="10.10.x.x" login="USER" passwd="PASSWD" lanplus="true" action="reboot" pcmk_reboot_action="reboot" pcmk_host_list="  db1 db2 db3 db4 " pcmk_monitor_action="status" \
    op monitor interval="30s" timeout="60s" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    meta. target-role="Started"
primitive fence_y stonith:fence_ipmilan \
    params ipaddr="10.10.y.y" login="USER" passwd="PASSWD" lanplus="true" action="reboot" pcmk_reboot_action="reboot" pcmk_host_list=" db1  db2 db3 db4 " pcmk_monitor_action="status" \
    op monitor interval="30s" timeout="60s" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    meta. target-role="Started"
primitive fence_s  stonith:fence_ipmilan \
    params ipaddr="10.10.s.s" login="USER" passwd="PASSWD" lanplus="true" action="reboot" pcmk_reboot_action="reboot" pcmk_host_list=" db1 db2  db3 db4 " pcmk_monitor_action="status" \
    op monitor interval="30s" timeout="60s" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    meta. target-role="Started"
primitive fence_214 stonith:fence_ipmilan \
    params ipaddr="10.10.w.w" login="USER" passwd="PASSWD" lanplus="true" action="reboot" pcmk_reboot_action="reboot" pcmk_host_list=" db1 db2 db3 db4 " pcmk_monitor_action="status" \
    op monitor interval="30s" timeout="60s" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    meta. target-role="Started"
primitive ping ocf:pacemaker:ping \
    params host_list=" 192.168.x.x 192.168.x.v   192.168.y.y 192.168.y.v 192.168.w.w 192.168.w.v  192.168.s.s  192.168.g.w" multiplier="100" \
    op monitor interval="10s" timeout="60s" \
    op start interval="0" timeout="60s" \
    op stop interval="0" timeout="60s"
primitive postgres_sdb2 ocf:heartbeat:pgsql \
    params pgctl="/usr/local/pgsql/bin/pg_ctl" psql="/usr/local/pgsql/bin/psql" pgport="5433" start_opt="" pgdata="/usr/local/pgsql/data_db2" config="/usr/local/pgsql/data_db2/postgresql.conf" pgdba="postgres" pgdb="postgres" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    op monitor interval="10s" timeout="60s" fail-count="10000" \
    meta. target-role="Started"
primitive postgres_sdb1 ocf:heartbeat:pgsql \
    params pgctl="/usr/local/pgsql/bin/pg_ctl" psql="/usr/local/pgsql/bin/psql" pgport="5432" start_opt="" pgdata="/usr/local/pgsql/data_db1" config="/usr/local/pgsql/data_db1/postgresql.conf" pgdba="postgres" pgdb="postgres" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    op monitor interval="10s" timeout="30s" fail-count="10000" \
    meta. target-role="Started" is-managed="true"
primitive postgres_sdb3 ocf:heartbeat:pgsql \
    params pgctl="/usr/local/pgsql/bin/pg_ctl" psql="/usr/local/pgsql/bin/psql" pgport="5434" start_opt="" pgdata="/usr/local/pgsql/data_db3" config="/usr/local/pgsql/data_db3/postgresql.conf" pgdba="postgres" pgdb="postgres" \
    op start interval="0" timeout="120s" \
    op stop interval="0" timeout="120s" \
    op monitor interval="10s" timeout="30s" fail-count="10000" \
    meta. target-role="Started" is-managed="true"
primitive vip_sdb2 ocf:heartbeat:IPaddr2 \
    params ip="192.168.x.v" cidr_netmask="22" nic="eth1" flush_routes="true" \
    op monitor interval="10s" failure-timeout="60s" \
    op start interval="0" timeout="30s" \
    op stop interval="0" timeout="30s" \
    meta. target-role="Started" is-managed="true"
primitive vip_sdb1 ocf:heartbeat:IPaddr2 \
    params ip="192.168.y.v" cidr_netmask="22" nic="eth1" flush_routes="true" \
    op monitor interval="10s" failure-timeout="60s" \
    op start interval="0" timeout="30s" \
    op stop interval="0" timeout="30s" \
    meta. target-role="Started" is-managed="true"
primitive vip_sdb3 ocf:heartbeat:IPaddr2 \
    params ip="192.168.s.v" cidr_netmask="22" nic="eth1" flush_routes="true" \
    op monitor interval="10s" failure-timeout="60s" \
    op start interval="0" timeout="30s" \
    op stop interval="0" timeout="30s" \
    meta. target-role="Started" is-managed="true"
clone clone-ping ping \
    meta. global-unique="true" target-role="Started"
location LOC-fence-211-NOT-x.x fence_211 -inf: db1
location LOC-fence-211-ON-y.y fence_211 1000: db2
location LOC-fence-211-ON-w.w fence_211 500: db3
location LOC-fence-211-ON-s.s fence_211 100: db4
location LOC-fence-212-NOT-y.y fence_212 -inf: db2
location LOC-fence-212-ON-x.x fence_212 100: db1
location LOC-fence-212-ON-w.w fence_212 1000: db3
location LOC-fence-212-ON-s.s fence_212 500: db4
location LOC-fence-213-NOT-w.w fence_213 -inf: db3
location LOC-fence-213-ON-x.x fence_213 500: db1
location LOC-fence-213-ON-y.y fence_213 100: db2
location LOC-fence-213-ON-s.s fence_213 1000: db4
location LOC-fence-214-NOT-s.s fence_214 -inf: db4
location LOC-fence-214-ON-x.x fence_214 1000: db1
location LOC-fence-214-ON-y.y fence_214 500: db2
location LOC-fence-214-ON-w.w fence_214 100: db3
location LOC-pg-sdb2-NOT-x.x postgres_sdb2 -inf: db1
location LOC-pg-sdb2-NOT-w.w postgres_sdb2 -inf: db3
location LOC-pg-sdb2-ON-y.y postgres_sdb2 1000: db2
location LOC-pg-sdb2-ON-s.s postgres_sdb2 500: db4
location LOC-pg-sdb1-NOT-y.y postgres_sdb1 -inf: db2
location LOC-pg-sdb1-NOT-w.w postgres_sdb1 -inf: db3
location LOC-pg-sdb1-ON-x.x postgres_sdb1 1000: db1
location LOC-pg-sdb1-ON-s.s postgres_sdb1 500: db4
location LOC-pg-sdb3-NOT-x.x postgres_sdb3 -inf: db1
location LOC-pg-sdb3-NOT-y.y postgres_sdb3 -inf: db2
location LOC-pg-sdb3-ON-w.w postgres_sdb3 1000: db3
location LOC-pg-sdb3-ON-s.s postgres_sdb3 500: db4
location LOC-vip-sdb2-NOT-x.x vip_sdb2 -inf: db1
location LOC-vip-sdb2-NOT-w.w vip_sdb2 -inf: db3
location LOC-vip-sdb2-No-connect vip_sdb2 \
    rule $id="LOC-vip-sdb2-No-connect-rule" -inf: pingd lt 200 and #uname eq db3
location LOC-vip-sdb2-ON-y.y vip_sdb2 1000: db2
location LOC-vip-sdb2-ON-s.s vip_sdb2 500: db4
location LOC-vip-sdb1-NOT-y.y vip_sdb1 -inf: db2
location LOC-vip-sdb1-NOT-w.w vip_sdb1 -inf: db3
location LOC-vip-sdb1-No-connect vip_sdb1 \
    rule $id="LOC-vip-sdb1-No-connect-rule" -inf: pingd lt 200 and #uname eq db1
location LOC-vip-sdb1-ON-x.x vip_sdb1 1000: db1
location LOC-vip-sdb1-ON-s.s vip_sdb1 500: db4
location LOC-vip-PCONLIEN-NOT-x.x vip_sdb3 -inf: db1
location LOC-vip-PCONLIEN-NOT-y.y vip_sdb3 -inf: db2
location LOC-vip-PCONLIEN-ON-w.w vip_sdb3 1000: db3
location LOC-vip-sdb3-No-connect vip_sdb3 \
    rule $id="LOC-vip-sdb3-No-connect-rule" -inf: pingd lt 200 and #uname eq db3
location LOC-vip-sdb3-ON-s.s vip_sdb3 500: db4
colocation COL-pg-vip-sdb2 inf: postgres_sdb2 vip_sdb2
colocation COL-pg-vip-sdb1 inf: postgres_sdb1 vip_sdb1
colocation COL-pg-vip-sdb3 inf: postgres_sdb3 vip_sdb3
colocation COL-vip-pg-sdb2 inf: vip_sdb2 postgres_sdb2
colocation COL-vip-pg-sdb1 inf: vip_sdb1 postgres_sdb1
colocation COL-vip-pg-sdb3 inf: vip_sdb3 postgres_sdb3
order ORDER-pg-vip-sdb1 inf: postgres_sdb1 vip_sdb1
order ORDER-pg-vip-sdb3 inf: postgres_sdb3 vip_sdb3
order ORDER-vip-pg-sdb1 inf: postgres_sdb2 vip_sdb2
property $id="cib-bootstrap-options" \
    dc-version="1.1.7-6.el6-148fccfd5985c5590cc601123c6c16e966b85d14" \
    cluster-infrastructure="openais" \
    expected-quorum-votes="5" \
    no-quorum-policy="ignore" \
    stonith-enabled="true" \
    cluster-delay="60s" \
    last-lrm-refresh="1370659086"
rsc_defaults $id="rsc-options" \
    resource-stickiness="5000"




來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/133735/viewspace-763604/,如需轉載,請註明出處,否則將追究法律責任。

相關文章