Corosync + DRBD + MySQL 構建高可用MySQL叢集

節點規劃:

node1.huhu.com172.16.100.103

node2.huhu.com172.16.100.104

資源名稱規劃

資源名稱:可以是除了空白字元外的任意ACSII碼字元

DRBD裝置:在雙節點上,此DRBD裝置檔案,一般為/dev/drbdN,主裝置號147

磁碟:在雙方節點上,各自提供儲存裝置

網路配置:雙方資料同步所使用的網路屬性

DRBD從Linux核心2.6.33起已經整合進核心

1.配置雙擊互信(基於祕鑰認證),HOSTS檔案,時間同步

1)所有節點的主機名稱和對應的IP地址解析服務可以正常工作,且每個節點的主機名稱需要跟”uname -n“命令的結果保持一致;因此,需要保證兩個節點上的/etc/hosts檔案均為下面的內容:

172.16.100.103node1.huhu.com node1

172.16.100.104node2.huhu.com node2

Node1:

#sed -i `s@(HOSTNAME=).*@1node1.huhu.com@g` /etc/sysconfig/network

#hostname node1.huhu.com

Node2

#sed -i `s@(HOSTNAME=).*@1node2.huhu.com@g` /etc/sysconfig/network

#hostname node2.huhu.com

2)設定兩個節點可以基於金鑰進行ssh通訊,這可以通過類似如下的命令實現:

#yum install openssh-clients

Node1:

#ssh-keygen -t rsa

#ssh-copy-id -i ~/.ssh/id_rsa.pub root@node2

Node2:

#ssh-keygen -t rsa

#ssh-copy-id -i ~/.ssh/id_rsa.pub root@node1

配置時間同步:

*/5* * * * root /usr/sbin/ntpdate ntp.api.bz & > /dev/null

2.建立和配置DRBD

Node1上執行:

#rpm -Uvh http://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm

#ssh node2 `rpm -Uvh http://www.elrepo.org/elrepo-release-6-6.el6.elrepo.noarch.rpm`

#yum update -y

#ssh node2 `yum update -y`

#yum install drbd84-utils kmod-drbd84 -y

#ssh node2 `yum install drbd84-utils kmod-drbd84 -y`

載入模組到核心:

#/sbin/modprobe drbd

#ssh node2 `/sbin/modprobe drbd`

DRBD的配置檔案:

/etc/drbd.conf

/etc/drbd.d/global_common.conf

/etc/drbd.d/resource.d/

#yum -y install parted

#ssh node2 `yum -y install parted`

#fdisk /dev/sdb

n新建分割槽

p主分割槽

1分割槽號,兩車回車按照預設大小選擇

wq儲存退出

#partprobe /dev/sdb1

資源規劃:

資源名稱:mydrbd

DRBD裝置:/dev/drbd0

磁碟:/dev/sdb1

網路配置:100M

#cat /etc/drbd.d/global_common.conf | grep -v “#”

global{

usage-countyes;

}

common{

handlers{

pri-on-incon-degr”/usr/lib/drbd/notify-pri-on-incon-degr.sh;/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;reboot -f”;

pri-lost-after-sb”/usr/lib/drbd/notify-pri-lost-after-sb.sh;/usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ;reboot -f”;

local-io-error”/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh;echo o > /proc/sysrq-trigger ; halt -f”;

}

startup{

}

options{

}

disk{

on-io-errordetach;

}

net{

cram-hmac-alg “sha1”;

shared-secret “1q2w3e4r5t6y”;

}

syncer{

rate 200M;

}

}

#cat mydrbd.res

resourcemydrbd {

device/dev/drbd0;

disk/dev/sdb1;

meta-diskinternal;

onnode1.huhu.com {

address172.16.100.103:7789;

}

onnode1.huhu.com {

address172.16.100.104:7789;

}

}

複製配置檔案到node2節點

scp-r /etc/drbd.* node2:/etc/

在兩個節點上,初始化已定義的資源,並啟動服務

#drbdadm create-md mydrbd

#ssh node2 `drbdadm create-md mydrbd`

#/etc/init.d/drbd start

#ssh node2 `/etc/init.d/drbd start`

檢視DRBD裝置的狀態:

#cat /proc/drbd

version:8.4.4 (api:1/proto:86-101)

GIT-hash:599f286440bd633d15d5ff985204aff4bccffadd build by phil@Build64R6, 2013-10-1415:33:06

0:cs:Connected ro:Secondary/Secondaryds:Inconsistent/Inconsistent C r—–

ns:0nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:2096348

目前兩個節點都處於secondary狀態,手動讓其node1成為主節點:

#drbdadm — –overwrite-data-of-peer primary mydrbd

#cat /proc/drbd

version:8.4.4 (api:1/proto:86-101)

GIT-hash:599f286440bd633d15d5ff985204aff4bccffadd build by phil@Build64R6, 2013-10-1415:33:06

0:cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDateC r—–

ns:2096348nr:0 dw:0 dr:2097012 al:0 bm:128 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0

在primary節點上進行格式化drbd分割槽然後掛載

#mke2fs -j /dev/drbd0

#mkdir /mydata

#mount /dev/drbd0 /mydata/

#cp /etc/inittab /mydata/

#ls -lh /mydata/

total20K

-rw-r–r–.1 root root 884 Jul 8 17:24 inittab

drwx——.2 root root 16K Jul 8 17:23 lost+found

到此drbd分割槽已經可以正常使用了

DRBD分割槽的主備切換

primary節點上執行:

#umount /mydata/

# drbdadm secondary mydrbd

#drbd-overview

0:mydrbd/0Connected Secondary/Secondary UpToDate/UpToDateC r—–

secondary上執行

#drbd-overview 確保兩者的狀態都有secondary狀態

#drbdadm primary mydrbd

#mkdir -p /mydata

# mount /dev/drbd0 /mydata/

#ls -lh /mydata/

total20K

-rw-r–r–.1 root root 884 Jul 8 17:24 inittab

drwx——.2 root root 16K Jul 8 17:23 lost+found

#drbd-overview

0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate Cr—– /mydata ext3 2.0G 36M 1.9G 2%

狀態已經改變,primary /secondary

3.配置coresync服務

各個節點上停止掉drbd服務,並且關閉開機啟動

#/etc/init.d/drbd stop

#ssh node2 `/etc/init.d/drbd stop`

#chkconfig drbd off

#ssh node2 `chkconfig drbd off`

#chkconfig –list | grep drbd

#ssh node2 `chkconfig –list | grep drbd`

drbd0:off 1:off 2:off 3:off 4:off 5:off 6:off

安裝corosync

#yum install libibverbs librdmacm lm_sensors libtool-ltdl openhpi-libs openhpiperl-TimeDate

#yum install corosync pacemaker

#ssh node2 `# yum install libibverbs librdmacm lm_sensors libtool-ltdlopenhpi-libs openhpi perl-TimeDate`

wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-2.1-1.1.x86_64.rpm&& wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/pssh-2.3.1-4.1.x86_64.rpm

#ssh node2 `wget http://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-2.1-1.1.x86_64.rpm&& wgethttp://ftp5.gwdg.de/pub/opensuse/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/pssh-2.3.1-4.1.x86_64.rpm`

yum–nogpgcheck localinstall crmsh-2.1-1.1.x86_64.rpm pssh-2.3.1-4.1.x86_64.rpm

如果安裝失敗,請新增以下源

#vim /etc/yum.repos.d/ha-clustering.repo

[haclustering]

name=HAClustering

baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/

enabled=1

gpgcheck=0

#yum –nogpgcheck localinstall crmsh-2.1-1.1.x86_64.rpmpssh-2.3.1-4.1.x86_64.rpm

node2上同樣執行以上

配置corosync

#cd /etc/corosync/

#cp corosync.conf.example corosync.conf

#cat corosync.conf | grep -v “^#” | sed -e`/^$/d`

compatibility: whitetank

totem {

version: 2

secauth: on

threads: 2

interface {

ringnumber: 0

bindnetaddr: 172.16.100.0

mcastaddr: 226.94.8.9

mcastport: 5405

ttl: 1

}

}

logging {

fileline: off

to_stderr: no

to_logfile: yes

to_syslog: no

logfile: /var/log/cluster/corosync.log

debug: off

timestamp: on

logger_subsys {

subsys: AMF

debug: off

}

}

service {

ver: 0

name: pacemaker

# use_mgmtd: yes

}

aisexec {

user: root

group: root

}

amf {

mode: disabled

}

生成祕鑰

#corosync-keygen

#scp -p authkey corosync.conf node2:/etc/corosync/

建立日誌檔案目錄

#mkdir -p /var/log/cluster/ -pv

# ssh node2 `mkdir -p /var/log/cluster/ -pv`

啟動corosync服務

#service corosync start

#ssh node2 `service corosync start`

檢查corosync引擎是否已經啟動

#grep -e “Corosync Cluster Engine” -e “configuration file”/var/log/cluster/corosync.log

Jul09 10:28:14 corosync [MAIN ] Corosync Cluster Engine (`1.4.1`): started andready to provide service.

Jul09 10:28:14 corosync [MAIN ] Successfully readmain configuration file `/etc/corosync/corosync.conf`.

檢視節點成員之間通訊是否正常

#grep TOTEM /var/log/cluster/corosync.log

Jul09 10:28:14 corosync [TOTEM ] Initializing transport (UDP/IP Multicast).

Jul09 10:28:14 corosync [TOTEM ] Initializing transmit/receive security:libtomcrypt SOBER128/SHA1HMAC (mode 0).

Jul09 10:28:14 corosync [TOTEM ] The network interface[172.16.100.103] is now up.

Jul09 10:28:14 corosync [TOTEM ] A processor joined or left the membership and anew membership was formed.

Jul09 10:28:29 corosync [TOTEM ] A processor joined or left the membership and anew membership was formed.

檢查pacemaker啟動是否正常

#grep pcmk_startup /var/log/cluster/corosync.log

Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: CRM: Initialized

Jul09 10:28:14 corosync [pcmk ] Logging: Initialized pcmk_startup

Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup:Maximum core file size is: 18446744073709551615

Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: Service: 9

Jul09 10:28:14 corosync [pcmk ] info: pcmk_startup: Local hostname: node1.huhu.com

檢視錯誤資訊

#grep ERROR /var/log/cluster/corosync.log | grep -v unpack_resources

Jul09 10:28:14 corosync [pcmk ] ERROR: process_ais_conf: You have configured acluster using the Pacemaker plugin for Corosync. The plugin is not supported inthis environment and will be removed very soon.

Jul09 10:28:14 corosync [pcmk ] ERROR: process_ais_conf: Please see Chapter 8 of`Clusters from Scratch` (http://www.clusterlabs.org/doc) for details on usingPacemaker with CMAN

Jul09 10:28:35 [1373] node1.huhu.com pengine: notice: process_pe_message:Configuration ERRORs found during PE processing. Please run “crm_verify-L” to identify issues.

注意:這裡因為沒有使用stonith裝置,因此錯誤可以忽略

#crm status

Lastupdated: Wed Jul 9 10:49:53 2014

Lastchange: Wed Jul 9 10:19:07 2014 via crmd on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node1.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

0Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

以上說明corosync配置啟動正常。

關閉stonith裝置,並驗證提交

crm(live)#configure

crm(live)configure#property stonith-enabled=false

crm(live)configure#verify

crm(live)configure#commit

關閉不具備法定票數的時候,不能關閉叢集服務

crm(live)configure# property no-quorum-policy=ignore

crm(live)configure#verify

crm(live)configure#commit

配置資源粘性,更傾向於當前節點

crm(live)configure#rsc_defaults resource-stickiness=100

crm(live)configure#verify

crm(live)configure#commit

檢視當前的配置

crm(live)configure#show

nodenode1.huhu.com

nodenode2.huhu.com

propertycib-bootstrap-options:

dc-version=1.1.10-14.el6_5.3-368c726

cluster-infrastructure=”classicopenais (with plugin)”

expected-quorum-votes=2

stonith-enabled=false

no-quorum-policy=ignore

rsc_defaultsrsc-options:

resource-stickiness=100

crm(live)configure#

檢視drbd的資源代理

crm(live)configure#cd ..

crm(live)#ra

crm(live)ra#providers drbd

linbit

注意:這裡只有linbit沒有beartbeat,corosync1.4以前的版本有heartbeat。

檢視原資料

crm(live)ra#meta ocf:linbit:drbd

定義資源:

crm(live)configure#primitive mysql_drbd ocf:linbit:drbd paramsdrbd_resource=mydrbd op start timeout=240 op stop timeout=100 op monitorrole=Master interval=50s timeout=30s op monitor role=Slave interval=60s timeout=30s

定義群集資源:

crm(live)configure#master MS_mysql_drbd mysql_drbd metamaster-max=”1″ master-node-max=”1″ clone-max=”2″clone-node-max=”1″ notify=”true”

crm(live)configure#show mysql_drbd

primitivemysql_drbd ocf:linbit:drbd

paramsdrbd_resource=mydrbd

opstart timeout=240 interval=0

opstop timeout=100 interval=0

opmonitor role=Master interval=50s timeout=30s

opmonitor role=Slave interval=60s timeout=30s

crm(live)configure#show MS_mysql_drbd

msMS_mysql_drbd mysql_drbd

metamaster-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true

crm(live)configure#verify

crm(live)configure#commit

crm(live)configure#cd

crm(live)#status

Lastupdated: Wed Jul 9 11:54:30 2014

Lastchange: Wed Jul 9 11:54:17 2014 via cibadmin on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

2Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

Master/Slave Set: MS_mysql_drbd [mysql_drbd]

Masters: [ node1.huhu.com ]

Slaves: [ node2.huhu.com ]

crm(live)#

主從資源已經定義完成

[root@node1corosync]# drbd-overview

0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate C r—–

[root@node1corosync]#

此時當前節點已經成為主資源了

手動做一次主從切換:

#crm node standby

#crm status

Lastupdated: Wed Jul 9 12:01:44 2014

Lastchange: Wed Jul 9 12:01:29 2014 via crm_attribute on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

2Resources configured

Nodenode1.huhu.com: standby

Online:[ node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters:[ node2.huhu.com ]

Stopped:[ node1.huhu.com ]

#crm node online

#crm status

Lastupdated: Wed Jul 9 12:02:46 2014

Lastchange: Wed Jul 9 12:02:43 2014 via crm_attribute on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

2Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters:[ node2.huhu.com ]

Slaves:[ node1.huhu.com ]

#drbd-overview

0:mydrbd/0Connected Secondary/Primary UpToDate/UpToDate C r—–

[root@node1corosync]#

當前節點就切換為從節點了

此時保證了資源可以主從切換,但是檔案系統是沒有掛載

因此必須定義檔案系統

crm(live)configure#primitive mystore ocf:heartbeat:Filesystem paramsdevice=/dev/drbd0 directory=/mydata fstype=ext3 op start timeout=60 op stoptimeout=60

crm(live)configure#verify

注意:這裡千萬不要提交,因為必須保證檔案系統跟主節點在一起,定義排列約束

crm(live)configure#colocation mystore_with_MS_mysql_drbd inf: mystoreMS_mysql_drbd:Master

定義儲存資源必須和資源的主節點在一起

crm(live)configure#order mystore_after_MS_mysql_drbd mandatory:MS_mysql_drbd:promote mystore:start

定義儲存資源必須在主節點啟動後進行掛載

crm(live)configure#verify

crm(live)configure#commit

crm(live)configure#cd ..

crm(live)#status

Lastupdated: Wed Jul 9 12:25:25 2014

Lastchange: Wed Jul 9 12:22:30 2014 via cibadmin on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

3Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters:[ node1.huhu.com ]

Slaves:[ node2.huhu.com ]

mystore(ocf::heartbeat:Filesystem): Started node1.huhu.com

crm(live)#

可以看到Master在node1上,mystore就啟動在node1上

[root@node1~]# ls -lh /mydata/

total20K

-rw-r–r–.1 root root 884 Jul 8 17:24 inittab

drwx——2 root root 16K Jul 8 17:23 lost+found

[root@node1~]#

手動模擬一次切換

[root@node1corosync]# crm node standby

[root@node1corosync]# crm status

Lastupdated: Wed Jul 9 12:28:55 2014

Lastchange: Wed Jul 9 12:28:49 2014 via crm_attribute on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

3Resources configured

Nodenode1.huhu.com: standby

Online:[ node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters:[ node2.huhu.com ]

Stopped:[ node1.huhu.com ]

mystore(ocf::heartbeat:Filesystem): Started node2.huhu.com

[root@node1corosync]#

[root@node2~]# ls -lh /mydata/

total20K

-rw-r–r–.1 root root 884 Jul 8 17:24 inittab

drwx——2 root root 16K Jul 8 17:23 lost+found

Youhave new mail in /var/spool/mail/root

[root@node2~]#

這樣就切換到node2節點上。

4.配置MySQL結合DRBD和corosync

分別在node1節點上建立MySQL使用者和組

#groupadd -g 3306 mysql

#useradd -u 3306 -g mysql -s /sbin/nologin -M mysql

#id mysql

uid=3306(mysql)gid=3306(mysql) groups=3306(mysql)

#ssh node2 `groupadd -g 3306 mysql`

#ssh node2 `useradd -u 3306 -g mysql -s /sbin/nologin -M mysql`

#wgethttp://cdn.mysql.com/Downloads/MySQL-5.5/mysql-5.5.38-linux2.6-x86_64.tar.gz

#tar zxvf mysql-5.5.38-linux2.6-x86_64.tar.gz -C /usr/local/

#cd /usr/local/

#ln -s mysql-5.5.38-linux2.6-x86_64/ mysql

#cd mysql

#chown root:mysql -R .

#cp support-files/my-huge.cnf /etc/my.cnf

#cp support-files/mysql.server /etc/init.d/mysqld

#[ -x /etc/init.d/mysqld ] && echo “ok” || echo “NO”

確保當前在主節點上操作

#drbd-overview

0:mydrbd/0Connected Primary/Secondary UpToDate/UpToDate Cr—– /mydata ext3 2.0G 36M 1.9G 2%

#mkdir -p /mydata/data

#chown -R mysql:mysql /mydata/data/

#scripts/mysql_install_db –user=mysql–datadir=/mydata/data

vim/etc/my.cnf

datadir=/mydata/data

#chkconfig –add mysqld

#chkconfig mysqld off

#service mysqld start

確保啟動OK

#/usr/local/mysql/bin/mysql -uroot -e “CREATE DATABASE mydb”

[root@node1mysql]# /usr/local/mysql/bin/mysql -uroot -e “SHOW DATABASES”

+——————–+

|Database |

+——————–+

|information_schema |

|mydb |

|mysql |

|performance_schema |

|test |

+——————–

#service mysqld stop

#chkconfig –list | grep 3:off | grep mysql

mysqld0:off 1:off 2:off 3:off 4:off 5:off 6:off

[root@node1mysql]#

將儲存資源切換到node2上,在node2MySQL配置好

#crm node standby

[root@node1mysql]# crm status

Lastupdated: Wed Jul 9 14:45:36 2014

Lastchange: Wed Jul 9 14:45:29 2014 via crm_attribute on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

3Resources configure

Nodenode1.huhu.com: standby

Online:[ node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node2.huhu.com ]

Stopped:[ node1.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com

[root@node1mysql]# crm node online

[root@node1mysql]# crm status

Lastupdated: Wed Jul 9 14:45:52 2014

Lastchange: Wed Jul 9 14:45:49 2014 via crm_attribute on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

3Resources configure

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node2.huhu.com ]

Slaves:[ node1.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com

[root@node1mysql]#

# scp /root/mysql-5.5.38-linux2.6-x86_64.tar.gz node2:/root/

# scp /etc/my.cnf node2:/etc/my.cnf

# scp /etc/init.d/mysqld node2:/etc/init.d/mysqld

在node2上安裝MySQL

#tar zxvf mysql-5.5.38-linux2.6-x86_64.tar.gz -C /usr/local/

#cd /usr/local/

#ln -s mysql-5.5.38-linux2.6-x86_64/ mysql

#cd mysql

#chown root:mysql -R .

注意:千萬不要手動建立/mydata/data,否則會導致檔案損壞

缺少相關庫檔案# yum install libaio

#service mysqld start

#/usr/local/mysql/bin/mysql -uroot -e “SHOW DATABASES”

+——————–+

|Database |

+——————–+

|information_schema |

|mydb |

|mysql |

|performance_schema |

|test |

+——————–+

[root@node2mydata]#

#service mysqld stop

#chkconfig mysqld off

配置MySQL成為叢集資源

crm(live)#configure

crm(live)configure#primitive mysqld lsb:mysqld

crm(live)configure#verify

crm(live)configure# colocation mysqld_with_mystore inf: mysqld mystore

crm(live)configure#show xml

<rsc_colocation id=”mysqld_with_mystore”score=”INFINITY” rsc=”mysqld” with-rsc=”mystore”/>

MySQL服務一定是跟MySQL儲存資源在一起

crm(live)configure#order mysqld_after_mystore mandatory: mystore mysqld

crm(live)configure#verify

MySQL服務一定是在MySQ儲存轉移之後的,因此定義順序約束

crm(live)#status

Lastupdated: Wed Jul 9 16:18:27 2014

Lastchange: Wed Jul 9 16:18:16 2014 via cibadmin on node2.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

4Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node2.huhu.com ]

Slaves:[ node1.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com

mysqld (lsb:mysqld): Started node2.huhu.com

crm(live)#

因此登入node2節點

#/usr/local/mysql/bin/mysql -uroot -e “SHOW DATABASES”

+——————–+

|Database |

+——————–+

|information_schema |

| mydb |

|mysql |

|performance_schema |

|test |

+——————–+

#/usr/local/mysql/bin/mysql -uroot -e “DROP DATABASE mydb”

#/usr/local/mysql/bin/mysql -uroot -e “CREATE DATABASE testdb”

這裡再次進行主從節切換

#crm node standby

#crm status

Masters:[ node1.huhu.com ]

Stopped:[ node2.huhu.com ]

mystore(ocf::heartbeat:Filesystem): Started node1.huhu.com

mysqld(lsb:mysqld): Started node1.huhu.com

#crm node online

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node1.huhu.com ]

Slaves:[ node2.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node1.huhu.com

mysqld (lsb:mysqld): Started node1.huhu.com

在node1節點上

#/usr/local/mysql/bin/mysql -uroot -e “SHOW DATABASES”

+——————–+

|Database |

+——————–+

|information_schema |

|mysql |

|performance_schema |

|test |

|testdb |

+——————–

testdb正常顯示

最後給MySQL定義一個虛擬IP資源

crm(live)configure#primitive myip ocf:heartbeat:IPaddr paramsip=172.16.100.119 nic=eth0 cidr_netmask=24

crm(live)configure#verify

crm(live)configure#colocation myip_with_MS_mysql_drbd inf:MS_mysql_drbd:Master myip

crm(live)configure#verify

crm(live)configure#show xml

crm(live)configure#commit

crm(live)configure#cd ..

crm(live)#status

Lastupdated: Wed Jul 9 16:46:27 2014

Lastchange: Wed Jul 9 16:46:20 2014 via cibadmin on node1.huhu.com

Stack:classic openais (with plugin)

CurrentDC: node2.huhu.com – partition with quorum

Version:1.1.10-14.el6_5.3-368c726

2Nodes configured, 2 expected votes

5Resources configured

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node1.huhu.com ]

Slaves:[ node2.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node1.huhu.com

mysqld (lsb:mysqld): Started node1.huhu.com

myip (ocf::heartbeat:IPaddr): Started node1.huhu.com

crm(live)

可以看到myip已經在node1啟動。

#ip addr

1:lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN

link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00

inet127.0.0.1/8 scope host lo

inet6::1/128 scope host

valid_lftforever preferred_lft forever

2:eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast stateUP qlen 1000

link/ether00:0c:29:a9:86:42 brd ff:ff:ff:ff:ff:ff

inet172.16.100.103/24 brd 172.16.100.255 scope global eth0

inet172.16.100.119/24 brd 172.16.100.255 scopeglobal secondary eth0

inet6fe80::20c:29ff:fea9:8642/64 scope link

valid_lftforever preferred_lft forever

5.在其節點上進行MySQL登入驗證

登入MySQL建立使用者

#/usr/local/mysql/bin/mysql -uroot -e “GRANT ALL ON *.* TO root@`%`IDENTIFIED BY `123.com`;FLUSH PRIVILEGES”

#mysql -uroot -p123.com -h172.16.100.119 -e “SHOW DATABASES”

+——————–+

|Database |

+——————–+

|information_schema |

|mysql |

|performance_schema |

|test |

| testdb |

+——————–+

[root@localhost~]#

在模擬主從節點切換:

#crm node standby

#crm node online

#crm status

Online:[ node1.huhu.com node2.huhu.com ]

Master/SlaveSet: MS_mysql_drbd [mysql_drbd]

Masters: [ node2.huhu.com ]

Slaves:[ node1.huhu.com ]

mystore (ocf::heartbeat:Filesystem): Started node2.huhu.com

mysqld (lsb:mysqld): Started node2.huhu.com

myip (ocf::heartbeat:IPaddr): Started node2.huhu.com

#mysql -uroot -p123.com -h172.16.100.119 -e “SHOW DATABASES”

 

[root@node2~]# crm

crm(live)# configure

crm(live)configure# show

nodenode1.huhu.com

       attributes standby=off

nodenode2.huhu.com

       attributes standby=off

primitive myipIPaddr

       params ip=172.16.100.119 nic=eth0 cidr_netmask=24

primitive mysql_drbdocf:linbit:drbd

       params drbd_resource=mydrbd

       op start timeout=240 interval=0

       op stop timeout=100 interval=0

       op monitor role=Masterinterval=50s timeout=30s

       op monitor role=Slaveinterval=60s timeout=30s

primitive mysqldlsb:mysqld

primitive mystoreFilesystem

       params device=”/dev/drbd0directory=”/mydatafstype=ext3

       op start timeout=60 interval=0

       op stop timeout=60 interval=0

msMS_mysql_drbd mysql_drbd

       meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true

colocationmyip_with_MS_mysql_drbd inf: MS_mysql_drbd:Master myip

colocationmysqld_with_mystore inf: mysqld mystore

colocationmystore_with_MS_mysql_drbd inf: mystore MS_mysql_drbd:Master

ordermysqld_after_mystore Mandatory: mystore mysqld

ordermystore_after_MS_mysql_drbd Mandatory: MS_mysql_drbd:promote mystore:start

property cib-bootstrap-options:

       dc-version=1.1.10-14.el6_5.3-368c726

       cluster-infrastructure=”classic openais (with plugin)

       expected-quorum-votes=2

       stonith-enabled=false

       no-quorum-policy=ignore

rsc_defaults rscoptions:

       resource-stickiness=100

crm(live)configure#