node1:ZhongH100.wxjr.com.cn 172.16.6.100 node2:ZhongH101.wxjr.com.cn 172.16.6.101 NFS存儲:ZhongH102.wxjr.com.cn 172.16.6.102
VIP:172.16.7.200 node1和node2的主機名、SSH互通 操作系統:都是CentOS 6.6 x86_64 (自己配置好yum源、epel源、關閉SELinux、關閉防火牆) 注意:是[root@ZhongH100 ~]還是[root@ZhongH101 ~]或者是[root@ZhongH102 ~]
如果是[root@ZhongH ~]那麼就是node1 node2兩台機器都需要運行的
拓撲結構:
一、在node1和node2上互做對方主機名解析、同步時間
[root@ZhongH100 /tmp]# tail -1 /etc/hosts 172.16.6.101 ZhongH101.wxjr.com.cn [root@ZhongH100 /tmp]# crontab -l */5 * * * * /usr/sbin/ntpdate pool.ntp.org >/dev/null 2>&1 [root@ZhongH100 /tmp]#
[root@ZhongH101 ~]# tail -1 /etc/hosts 172.16.6.100 ZhongH100.wxjr.com.cn [root@ZhongH101 /tmp]# crontab -l */5 * * * * /usr/sbin/ntpdate pool.ntp.org >/dev/null 2>&1 [root@ZhongH101 /tmp]#
二、node1和node2兩節點之間配置SSH互信
[root@ZhongH100 /tmp]# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P '' Generating public/private rsa key pair. Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: 29:d6:6a:68:92:62:7b:84:c1:de:b1:65:20:bf:f4:19 [email protected] The key's randomart image is: +--[ RSA 2048]----+ | | | . . | |. o . | | o + E . . | |. = B = S | | o * = o | |..+ o o | |...+ . | | .. | +-----------------+ [root@ZhongH100 /tmp]# ssh-copy-id -i ~/.ssh/id_rsa.pub [email protected] The authenticity of host 'zhongh101.wxjr.com.cn (172.16.6.101)' can't be established. RSA key fingerprint is e9:95:aa:7f:39:5b:52:a7:9b:5e:fe:98:19:82:14:e3. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'zhongh101.wxjr.com.cn,172.16.6.101' (RSA) to the list of known hosts. [email protected]'s password: Now try logging into the machine, with "ssh '[email protected]'", and check in: .ssh/authorized_keys to make sure we haven't added extra keys that you weren't expecting. [root@ZhongH100 /tmp]#
[root@ZhongH101 ~]# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P '' Generating public/private rsa key pair. Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: 7e:b2:24:1d:29:ea:41:6b:02:fc:41:e6:81:5a:4c:e3 [email protected] The key's randomart image is: +--[ RSA 2048]----+ | o | | +.. | | .E+ | |o.+ . . | |o. o. . S | | ....o + . | | ..= . = . | | + . o + | | . . | +-----------------+ [root@ZhongH101 ~]# ssh-copy-id -i .ssh/id_rsa.pub [email protected] The authenticity of host 'zhongh100.wxjr.com.cn (172.16.6.100)' can't be established. RSA key fingerprint is 90:26:f4:28:31:04:03:6c:9f:ec:e4:09:04:32:92:ee. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added 'zhongh100.wxjr.com.cn,172.16.6.100' (RSA) to the list of known hosts. [email protected]'s password: Now try logging into the machine, with "ssh '[email protected]'", and check in: .ssh/authorized_keys to make sure we haven't added extra keys that you weren't expecting. [root@ZhongH101 ~]#
三、安裝Heartbeat和Nginx
節點1、節點2上安裝Heartbeat
[root@ZhongH /tmp]# yum -y install heartbeat*
節點1、節點2上編譯安裝Nginx
[root@ZhongH /tmp]# wget http://nginx.org/download/nginx-1.9.1.tar.gz [root@ZhongH /tmp]# tar xf nginx-1.9.1.tar.gz [root@ZhongH /tmp]# cd nginx-1.9.1 [root@ZhongH /tmp/nginx-1.9.1]# Username="www" && for i in `seq 1000 1500`;do [ -z "$(awk -F: '{print$3,$4}' /etc/passwd | grep "$i")" -a -z "$(awk -F: '{print$3}' /etc/group | grep "$i")" ] && UGID=$i && break;done && groupadd -g $UGID $Username && useradd -M -u $UGID -g $UGID -s /sbin/nologin $Username [root@ZhongH /tmp/nginx-1.9.1]# yum install pcre-devel pcre -y [root@ZhongH /tmp/nginx-1.9.1]# mkdir -p {/tmp/nginx,/var/run/nginx,/var/lock} [root@ZhongH /tmp/nginx-1.9.1]# ./configure --prefix=/usr/local/nginx/ --user=www --group=www \ --error-log-path=/tmp/nginx/error.log --http-log-path=/tmp/nginx/access.log \ --pid-path=/var/run/nginx/nginx.pid --lock-path=/var/lock/nginx.lock \ --with-pcre --with-http_ssl_module --with-http_flv_module \ --with-http_spdy_module --with-http_gzip_static_module \ --with-http_stub_status_module --http-client-body-temp-path=/usr/local/nginx/client/ \ --http-proxy-temp-path=/usr/local/nginx/proxy/ --http-fastcgi-temp-path=/usr/local/nginx/fcgi/ \ --http-uwsgi-temp-path=/usr/local/nginx/uwsgi --http-scgi-temp-path=/usr/local/nginx/scgi [root@ZhongH /tmp/nginx-1.9.1]# make -j $(awk '{if($1=="processor"){i++}}END{print i}' /proc/cpuinfo) && make install && echo $? [root@ZhongH /tmp/nginx-1.9.1]# cd ../ [root@ZhongH /tmp]# echo "export PATH=/usr/local/nginx/sbin:\$PATH" > /etc/profile.d/nginx1.9.1.sh [root@ZhongH /tmp]# . /etc/profile.d/nginx1.9.1.sh [root@ZhongH /tmp]# which nginx /usr/local/nginx/sbin/nginx [root@ZhongH /tmp]# nginx -V nginx version: nginx/1.9.1 built by gcc 4.4.7 20120313 (Red Hat 4.4.7-11) (GCC) built with OpenSSL 1.0.1e-fips 11 Feb 2013 TLS SNI support enabled configure arguments: --prefix=/usr/local/nginx/ --user=www --group=www --error-log-path=/tmp/nginx/error.log --http-log-path=/tmp/nginx/access.log --pid-path=/var/run/nginx/nginx.pid --lock-path=/var/lock/nginx.lock --with-pcre --with-http_ssl_module --with-http_flv_module --with-http_spdy_module --with-http_gzip_static_module --with-http_stub_status_module --http-client-body-temp-path=/usr/local/nginx/client/ --http-proxy-temp-path=/usr/local/nginx/proxy/ --http-fastcgi-temp-path=/usr/local/nginx/fcgi/ --http-uwsgi-temp-path=/usr/local/nginx/uwsgi --http-scgi-temp-path=/usr/local/nginx/scgi [root@ZhongH /tmp]# wget http://www.dwhd.org/script/Nginx-init-CentOS -O /etc/rc.d/init.d/nginx [root@ZhongH /tmp]# chmod +x /etc/rc.d/init.d/nginx [root@ZhongH /tmp]# chkconfig --add nginx [root@ZhongH /tmp]# chkconfig nginx on [root@ZhongH /tmp]# chkconfig nginx off #取消nginx的開機啟動是因為我們需要讓heartbeat來控制nginx [root@ZhongH /tmp]# chkconfig --list nginx nginx 0:關閉 1:關閉 2:啟用 3:啟用 4:啟用 5:啟用 6:關閉 [root@ZhongH /tmp]# sed -i '$ i \\tinclude vhost/*.conf;' /usr/local/nginx/conf/nginx.conf [root@ZhongH /tmp]# mkdir -p /usr/local/nginx/conf/vhost
四、配置node1和node2上web服務
1、節點1 配置虛擬主機
[root@ZhongH100 /tmp]# mkdir -p /home/wwwroot/ZhongH100.wxjr.com.cn [root@ZhongH100 /tmp]# cat /usr/local/nginx/conf/vhost/ZhongH100.wxjr.com.cn.conf 我的節點2虛擬主機配置 server { listen 80; server_name ZhongH100.wxjr.com.cn; root /home/wwwroot/ZhongH100.wxjr.com.cn; index index.html index.php index.htm; access_log /tmp/nginx/ZhongH100.wxjr.com.cn_nginx.log combined; } [root@ZhongH100 /tmp]# echo "Hello node1" >> /home/wwwroot/ZhongH100.wxjr.com.cn/index.html [root@ZhongH100 /tmp]# /etc/init.d/nginx configtest nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful [root@ZhongH100 /tmp]# /etc/init.d/nginx restart nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful 停止 nginx: [失敗] 正在啟動 nginx: [確定] [root@ZhongH100 /tmp]# ss -tnl | grep 80 LISTEN 0 511 *:80 *:*
[root@ZhongH100 ~]# sed -i '/<h1>Welcome to nginx!<\/h1>/a <h1> This is node1!</h1>' /usr/local/nginx/html/index.html
2、節點2 配置虛擬主機
[root@ZhongH101 /tmp]# mkdir -p /home/wwwroot/ZhongH101.wxjr.com.cn [root@ZhongH101 /tmp]# cat /usr/local/nginx/conf/vhost/ZhongH101.wxjr.com.cn.conf 我的節點2虛擬主機配置 server { listen 80; server_name ZhongH101.wxjr.com.cn; root /home/wwwroot/ZhongH101.wxjr.com.cn; index index.html index.php index.htm; access_log /tmp/nginx/ZhongH101.wxjr.com.cn_nginx.log combined; } [root@ZhongH101 /tmp]# echo "Hello node2" >> /home/wwwroot/ZhongH101.wxjr.com.cn/index.html [root@ZhongH101 /tmp]# /etc/init.d/nginx configtest nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful [root@ZhongH101 /tmp]# /etc/init.d/nginx restart nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful 停止 nginx: [確定] 正在啟動 nginx: [確定] [root@ZhongH101 /tmp]# ss -tnl | grep 80 LISTEN 0 511 *:80 *:*
[root@ZhongH101 ~]# sed -i '/<h1>Welcome to nginx!<\/h1>/a <h1> This is node2!</h1>' /usr/local/nginx/html/index.html
檢測下兩個節點的web服務
五、配置node1和node2上Heartbeat
1、拷貝Heartbeat所需要的配置文件到Heartbeat的配置文件目錄
[root@ZhongH100 /tmp]# cd /etc/ha.d/ [root@ZhongH100 /etc/ha.d]# cd /usr/share/doc/heartbeat-* [root@ZhongH100 /usr/share/doc/heartbeat-3.0.4]# cd - /etc/ha.d [root@ZhongH100 /etc/ha.d]# cp /usr/share/doc/heartbeat-*/{authkeys,ha.cf,haresources} ./ #authkeys #是節點之間的認證key文件,我們不能讓什麼服務器都加入集群中來,加入集群中的節點都是需要認證的 #ha.cf #heartbeat的主配置文件 #haresources #集群資源管理配置文件(在heartbeat所有版本中都是支持haresources來配置集群中的資源的)
2、配置authkeys文件
[root@ZhongH100 /etc/ha.d]# echo -e "auth 1\n1 md5 $(cat /dev/urandom | head | openssl md5 | awk '{print $2}')" >> /etc/ha.d/authkeys [root@ZhongH100 /etc/ha.d]# tail /etc/ha.d/authkeys # # crc adds no security, except from packet corruption. # Use only on physically secure networks. # #auth 1 #1 crc #2 sha1 HI! #3 md5 Hello! auth 1 1 md5 a21763677359cdeac3fd8cb1d5e79a1a [root@ZhongH100 /etc/ha.d]# chmod 600 /etc/ha.d/authkeys [root@ZhongH100 /etc/ha.d]#
3、修改ha.cf配置文件,下面是我的配置范本,修改心跳信息的傳播方式(這裡是組播)
[root@ZhongH100 /etc/ha.d]# grep -Ev '^#|^$' ha.cf logfacility local0 mcast eth0 225.172.16.1 694 1 0 #mcast 組播通訊,參數如下: 通訊所用的接口 綁定的組播IP(224.0.0.0-239.255.255.255)通訊端口 ttl 是否允許環回。 auto_failback on node ZhongH100.wxjr.com.cn #配置集群中的節點數 node ZhongH101.wxjr.com.cn #配置集群中的節點數
4、配置haresources文件,下面是我的配置范本
[root@ZhongH100 /etc/ha.d]# echo "ZhongH100.wxjr.com.cn IPaddr::172.16.7.200/16/eth0 nginx" >> haresources [root@ZhongH100 /etc/ha.d]# grep -Ev '^#|^$' /etc/ha.d/haresources ZhongH100.wxjr.com.cn IPaddr::172.16.7.200/16/eth0 nginx
5、將node1節點上的heartbeat配置文件傳到node2節點上
[root@ZhongH100 /etc/ha.d]# scp authkeys ha.cf haresources ZhongH101.wxjr.com.cn:/etc/ha.d/ authkeys 100% 691 0.7KB/s 00:00 ha.cf 100% 10KB 10.3KB/s 00:00 haresources 100% 5962 5.8KB/s 00:00 [root@ZhongH100 /etc/ha.d]# ssh ZhongH101.wxjr.com.cn "ls -l /etc/ha.d" #通過ssh命令查看node2節點上的文件 總用量 44 -rw------- 1 root root 691 5月 28 22:25 authkeys -rw-r--r-- 1 root root 10591 5月 28 22:25 ha.cf -rwxr-xr-x 1 root root 745 12月 3 2013 harc -rw-r--r-- 1 root root 5962 5月 28 22:25 haresources drwxr-xr-x 2 root root 4096 5月 28 17:58 rc.d -rw-r--r-- 1 root root 692 12月 3 2013 README.config drwxr-xr-x 2 root root 4096 5月 28 17:58 resource.d -rw-r--r-- 1 root root 2082 5月 6 18:52 shellfuncs [root@ZhongH100 /etc/ha.d]#
6、啟動節點上的heartbeat,上面我們已經對node1和node2做了ssh互信,這裡就直接在node1上用ssh遠程執行命令來控制node2
[root@ZhongH100 /etc/ha.d]# /etc/init.d/heartbeat start Starting High-Availability services: INFO: Resource is stopped Done. [root@ZhongH100 /etc/ha.d]# ssh ZhongH101.wxjr.com.cn "/etc/init.d/heartbeat start" Starting High-Availability services: 2015/05/28_22:47:02 INFO: Resource is stopped Done. [root@ZhongH100 /etc/ha.d]#
六、測試Web集群
1、查看服務啟動情況
[root@ZhongH100 ~]# netstat -ntulp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2015/master tcp 0 0 0.0.0.0:63705 0.0.0.0:* LISTEN 1581/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 2588/sshd tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1456/rpcbind tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 2157/nginx tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1781/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1610/cupsd tcp 0 0 ::1:25 :::* LISTEN 2015/master tcp 0 0 ::1:6010 :::* LISTEN 2588/sshd tcp 0 0 :::19454 :::* LISTEN 1581/rpc.statd tcp 0 0 :::111 :::* LISTEN 1456/rpcbind tcp 0 0 :::22 :::* LISTEN 1781/sshd tcp 0 0 ::1:631 :::* LISTEN 1610/cupsd udp 0 0 127.0.0.1:909 0.0.0.0:* 1581/rpc.statd udp 0 0 0.0.0.0:783 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:50097 0.0.0.0:* 1581/rpc.statd udp 0 0 225.172.16.1:694 0.0.0.0:* 1928/heartbeat: wri udp 0 0 0.0.0.0:46815 0.0.0.0:* 1928/heartbeat: wri udp 0 0 0.0.0.0:68 0.0.0.0:* 1328/dhclient udp 0 0 0.0.0.0:111 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1610/cupsd udp 0 0 :::783 :::* 1456/rpcbind udp 0 0 :::41894 :::* 1581/rpc.statd udp 0 0 :::111 :::* 1456/rpcbind [root@ZhongH100 ~]#
2、檢查web頁面
3、故障演示
1)、關閉node1節點上的heartbeat
[root@ZhongH100 ~]# /etc/init.d/heartbeat stop Stopping High-Availability services: Done. [root@ZhongH100 ~]#
查看node1節點上的ip
[root@ZhongH100 ~]# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:e6:29:99 brd ff:ff:ff:ff:ff:ff inet 172.16.6.100/16 brd 172.16.255.255 scope global eth0 inet6 fe80::20c:29ff:fee6:2999/64 scope link valid_lft forever preferred_lft forever 3: pan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN link/ether da:7b:ee:d4:f3:0b brd ff:ff:ff:ff:ff:ff [root@ZhongH100 ~]#
查看node2節點上的ip
[root@ZhongH101 ~]# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:14:0f:ea brd ff:ff:ff:ff:ff:ff inet 172.16.6.101/16 brd 172.16.255.255 scope global eth0 inet 172.16.7.200/16 brd 172.16.255.255 scope global secondary eth0 inet6 fe80::20c:29ff:fe14:fea/64 scope link valid_lft forever preferred_lft forever 3: pan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN link/ether f2:d1:dc:4f:50:3a brd ff:ff:ff:ff:ff:ff [root@ZhongH101 ~]#
由此可見 已經轉移到node2節點上了,大家可以在這是把node1的Heartbeat啟動起來再看看,這裡就不再演示了
2)、再次訪問http://172.16.7.200
由此可見集群已經轉移到node2上,從而實現了Web高可用。
七、架構問題擴展和追問以及解決方案
可以從上面的實驗中可以看出,此集群架構存在一重要問題?
當主服器故障了或說宕機了,服務(這裡是80端口)和VIP立即切換到從服務器上繼續提供服務,這是沒有問題的,若是這個集群提供的是一個可以上傳的web服務,一用戶剛剛上傳一文件到主服務器上,這時主服務器宕了,用戶一刷新發現剛剛上傳的文件沒有了,用戶會怎麼想呢?這會帶來用戶體驗的缺失。
那就有人會問了,那有沒有什麼方法解決這個問題呢?答案是肯定有的,基本上有兩種解決方案,
一種是各節點之間進行文件同步,另一種是各節點之間使用共享存儲。下面我們就來說一說這兩種方案。
第一種方案,我們說了使用節點間的文件同步,我們一般用Rsync+Inotify組合方案來解決節點之間的同步問題,
但有個問題,我們說有極端一點,還是剛才上傳文件的問題,用戶剛開始上傳文件,這時主服務器宕機了,還沒有同步,用戶看不到文件,還有一點情況,用戶上傳的文件比較大,各節點之間正在同步,這時主服務器宕機了,用戶看到的文件是不完整,也不能達到很好的用戶體驗。而且節點之間同步,還會占用大量的帶寬。說了這麼多,我們總結一下,這種方案的優缺點吧,
優點:可節點一台或多台服務器,節約成本,在小規模的集群中,文件同步效果還是不錯的,在大規模的集群服務器不推薦使用,
缺點:節點之間同步文件,占用大量的網絡帶寬,降低整體集群性能,在比較煩忙的集群中不推薦使用。
第二種方案,就是我們所說的共享存儲方案了,共享存儲方案也有兩種方案,
一種是文件級別的共享存儲(如,NFS文件服務器),另一種是塊級別的共享存儲(如,iscsi共享存儲)。
用這種方案就能很好的解決上述的問題,上面的集群中有兩個節點,主節點與從節點共享一個文件服務器,當主節點提供服務時,文件服務器是掛載在主節點上,當主節點宕機了,從節點來提供服務,並掛載同一個文件服務器,這時我們就不用擔心當主節點宕機了用戶沒有上傳完的文件沒有了,因為我們使用的同一共享存儲,只要文件上傳到時服務器中,我們就能看,不用擔心文件不存在,或說得同步文件的問題了。在高可用集群中我比較推薦共享存儲。下面我們就用NFS來演示一下各節點之間掛載共享存儲。
八、共享存儲配置
1、配置NFS服務器
[root@ZhongH102 ~]# mkdir -pv /home/wwwroot/data mkdir: 已創建目錄 "/home/wwwroot" mkdir: 已創建目錄 "/home/wwwroot/data" [root@ZhongH102 ~]# echo "/home/wwwroot/data 172.16.0.0/8(ro,async)" >> /etc/exports [root@ZhongH102 ~]# echo '<h1>Hello This is NFS Server</h1>' > /home/wwwroot/data/index.html [root@ZhongH102 ~]# /etc/init.d/portmap start bash: /etc/init.d/portmap: 沒有那個文件或目錄 [root@ZhongH102 ~]# /etc/init.d/portreserve start 正在啟動 portreserve: [確定] [root@ZhongH102 ~]# /etc/init.d/nfs start 啟動 NFS 服務: [確定] 關掉 NFS 配額: [確定] 啟動 NFS mountd: [確定] 啟動 NFS 守護進程: [確定] 正在啟動 RPC idmapd: [確定] [root@ZhongH102 ~]# showmount -e 172.16.6.102 Export list for 172.16.6.102: /home/wwwroot/data 172.16.0.0/8 [root@ZhongH102 ~]#
2、節點測試掛載
1)、節點1
[root@ZhongH100 ~]# mount -t nfs 172.16.6.102:/home/wwwroot/data/ /usr/local/nginx/html [root@ZhongH100 ~]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data/ on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) [root@ZhongH100 ~]# umount /usr/local/nginx/html/ [root@ZhongH100 ~]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) [root@ZhongH100 ~]#
2)、節點2
[root@ZhongH101 ~]# mount -t nfs 172.16.6.102:/home/wwwroot/data/ /usr/local/nginx/html [root@ZhongH101 ~]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data/ on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.101) [root@ZhongH101 ~]# umount /usr/local/nginx/html/ [root@ZhongH101 ~]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) [root@ZhongH101 ~]#
3、修改node1和node2兩節點上的haresources文件
1)、將haresources修改為下面的格式
[root@ZhongH100 ~]# grep -v '^#' /etc/ha.d/haresources ZhongH100.wxjr.com.cn IPaddr::172.16.7.200/8/eth0 Filesystem::172.16.6.102:/home/wwwroot/data::/usr/local/nginx/html::nfs nginx [root@ZhongH100 ~]#
2)、將node1節點上的haresources文件同步到node2節點上
[root@ZhongH100 ~]# scp /etc/ha.d/haresources 172.16.6.101:/etc/ha.d/ haresources 100% 6028 5.9KB/s 00:00 [root@ZhongH100 ~]#
4、重啟heartbeat
[root@ZhongH100 ~]# ssh 172.16.6.101 "/etc/init.d/heartbeat restart" Stopping High-Availability services: Done. Waiting to allow resource takeover to complete:Done. Starting High-Availability services: 2015/05/29_00:04:32 INFO: Resource is stopped Done. [root@ZhongH100 ~]# /etc/init.d/heartbeat restart Stopping High-Availability services: Done. Waiting to allow resource takeover to complete:Done. Starting High-Availability services: INFO: Resource is stopped Done. [root@ZhongH100 ~]#
5、檢測端口和掛載
[root@ZhongH100 ~]# netstat -ntulp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 0.0.0.0:49752 0.0.0.0:* LISTEN - tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2015/master tcp 0 0 0.0.0.0:63705 0.0.0.0:* LISTEN 1581/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 2588/sshd tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1456/rpcbind tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 8433/nginx tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1781/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1610/cupsd tcp 0 0 ::1:25 :::* LISTEN 2015/master tcp 0 0 ::1:6010 :::* LISTEN 2588/sshd tcp 0 0 :::19454 :::* LISTEN 1581/rpc.statd tcp 0 0 :::26303 :::* LISTEN - tcp 0 0 :::111 :::* LISTEN 1456/rpcbind tcp 0 0 :::22 :::* LISTEN 1781/sshd tcp 0 0 ::1:631 :::* LISTEN 1610/cupsd udp 0 0 0.0.0.0:62075 0.0.0.0:* 7924/heartbeat: wri udp 0 0 127.0.0.1:909 0.0.0.0:* 1581/rpc.statd udp 0 0 0.0.0.0:783 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:50097 0.0.0.0:* 1581/rpc.statd udp 0 0 225.172.16.1:694 0.0.0.0:* 7924/heartbeat: wri udp 0 0 0.0.0.0:68 0.0.0.0:* 1328/dhclient udp 0 0 0.0.0.0:111 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1610/cupsd udp 0 0 :::783 :::* 1456/rpcbind udp 0 0 :::41894 :::* 1581/rpc.statd udp 0 0 :::111 :::* 1456/rpcbind [root@ZhongH100 ~]# ssh 172.16.6.101 "netstat -ntulp" Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2010/master tcp 0 0 0.0.0.0:63449 0.0.0.0:* LISTEN 1589/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 2257/sshd tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1464/rpcbind tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1789/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1618/cupsd tcp 0 0 ::1:25 :::* LISTEN 2010/master tcp 0 0 ::1:6010 :::* LISTEN 2257/sshd tcp 0 0 :::56044 :::* LISTEN 1589/rpc.statd tcp 0 0 :::111 :::* LISTEN 1464/rpcbind tcp 0 0 :::22 :::* LISTEN 1789/sshd tcp 0 0 ::1:631 :::* LISTEN 1618/cupsd udp 0 0 0.0.0.0:36225 0.0.0.0:* 1589/rpc.statd udp 0 0 127.0.0.1:917 0.0.0.0:* 1589/rpc.statd udp 0 0 0.0.0.0:791 0.0.0.0:* 1464/rpcbind udp 0 0 225.172.16.1:694 0.0.0.0:* 5316/heartbeat: wri udp 0 0 0.0.0.0:68 0.0.0.0:* 1336/dhclient udp 0 0 0.0.0.0:28508 0.0.0.0:* 5316/heartbeat: wri udp 0 0 0.0.0.0:111 0.0.0.0:* 1464/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1618/cupsd udp 0 0 :::791 :::* 1464/rpcbind udp 0 0 :::111 :::* 1464/rpcbind udp 0 0 :::35954 :::* 1589/rpc.statd [root@ZhongH100 ~]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) [root@ZhongH100 ~]# ssh 172.16.6.101 "mount" /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) [root@ZhongH100 ~]#
6、故障演示
[root@ZhongH100 ~]# /etc/init.d/heartbeat stop Stopping High-Availability services: Done. [root@ZhongH100 ~]# netstat -ntulp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2015/master tcp 0 0 0.0.0.0:63705 0.0.0.0:* LISTEN 1581/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 2588/sshd tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1456/rpcbind tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1781/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1610/cupsd tcp 0 0 ::1:25 :::* LISTEN 2015/master tcp 0 0 ::1:6010 :::* LISTEN 2588/sshd tcp 0 0 :::19454 :::* LISTEN 1581/rpc.statd tcp 0 0 :::111 :::* LISTEN 1456/rpcbind tcp 0 0 :::22 :::* LISTEN 1781/sshd tcp 0 0 ::1:631 :::* LISTEN 1610/cupsd udp 0 0 127.0.0.1:909 0.0.0.0:* 1581/rpc.statd udp 0 0 0.0.0.0:783 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:50097 0.0.0.0:* 1581/rpc.statd udp 0 0 0.0.0.0:68 0.0.0.0:* 1328/dhclient udp 0 0 0.0.0.0:111 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1610/cupsd udp 0 0 :::783 :::* 1456/rpcbind udp 0 0 :::41894 :::* 1581/rpc.statd udp 0 0 :::111 :::* 1456/rpcbind [root@ZhongH100 ~]# ssh 172.16.6.101 "mount" /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.101) [root@ZhongH100 ~]#
7、查看web
8、查看系統日志
[root@ZhongH100 ~]# tail /var/log/messages May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: killing HBFIFO process 7923 with signal 15 May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: killing HBWRITE process 7924 with signal 15 May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: killing HBREAD process 7925 with signal 15 May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: Core process 7923 exited. 3 remaining May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: Core process 7924 exited. 2 remaining May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: Core process 7925 exited. 1 remaining May 29 00:12:58 ZhongH100 heartbeat: [7905]: info: zhongh100.wxjr.com.cn Heartbeat shutdown complete. May 29 00:13:02 ZhongH100 dhclient[1328]: DHCPREQUEST on eth0 to 172.16.0.1 port 67 (xid=0x1014dbde) May 29 00:13:02 ZhongH100 dhclient[1328]: DHCPACK from 172.16.0.1 (xid=0x1014dbde) May 29 00:13:04 ZhongH100 dhclient[1328]: bound to 172.16.6.100 -- renewal in 722 seconds. [root@ZhongH100 ~]# /etc/init.d/heartbeat start Starting High-Availability services: INFO: Resource is stopped Done. [root@ZhongH100 ~]# tail /var/log/messages May 29 00:15:09 ZhongH100 harc(default)[9023]: info: Running /etc/ha.d//rc.d/status status May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: Comm_now_up(): updating status to active May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: Local status now set to: 'active' May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: remote resource transition completed. May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: remote resource transition completed. May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: Local Resource acquisition completed. (none) May 29 00:15:10 ZhongH100 heartbeat: [9003]: info: zhongh101.wxjr.com.cn wants to go standby [foreign] May 29 00:15:11 ZhongH100 heartbeat: [9003]: info: standby: acquire [foreign] resources from zhongh101.wxjr.com.cn May 29 00:15:11 ZhongH100 heartbeat: [9041]: info: acquire local HA resources (standby). May 29 00:15:12 ZhongH100 ResourceManager(default)[9054]: info: Acquiring resource group: zhongh100.wxjr.com.cn IPaddr::172.16.7.200/8/eth0 Filesystem::172.16.6.102:/home/wwwroot/data::/usr/local/nginx/html::nfs nginx [root@ZhongH100 ~]#
[root@ZhongH100 ~]# ssh 172.16.6.101 "tail /var/log/messages" May 29 00:15:11 ZhongH101 /usr/lib/ocf/resource.d//heartbeat/Filesystem(Filesystem_172.16.6.102:/home/wwwroot/data)[6953]: INFO: Success May 29 00:15:11 ZhongH101 ResourceManager(default)[6897]: info: Running /etc/ha.d/resource.d/IPaddr 172.16.7.200/8/eth0 stop May 29 00:15:11 ZhongH101 IPaddr(IPaddr_172.16.7.200)[7079]: INFO: IP status = ok, IP_CIP= May 29 00:15:11 ZhongH101 /usr/lib/ocf/resource.d//heartbeat/IPaddr(IPaddr_172.16.7.200)[7053]: INFO: Success May 29 00:15:11 ZhongH101 heartbeat: [6884]: info: foreign HA resource release completed (standby). May 29 00:15:11 ZhongH101 heartbeat: [5311]: info: Local standby process completed [foreign]. May 29 00:15:12 ZhongH101 heartbeat: [5311]: WARN: 1 lost packet(s) for [zhongh100.wxjr.com.cn] [11:13] May 29 00:15:12 ZhongH101 heartbeat: [5311]: info: remote resource transition completed. May 29 00:15:12 ZhongH101 heartbeat: [5311]: info: No pkts missing from zhongh100.wxjr.com.cn! May 29 00:15:12 ZhongH101 heartbeat: [5311]: info: Other node completed standby takeover of foreign resources. [root@ZhongH100 ~]#
經過上面的幾步可以發現,實驗成功,不論node1和node2誰掛掉了,我們的web服務都是正常的,實現了Web的高可用。
九、安裝MariaDB實現WEB+SQL高可用
1、在NFS服務器上再加個NFS共享
(這裡在NFS上做LVM分區就不多說了 不會的可以看我之前LVM的博文 點擊去看LVM)
[root@ZhongH102 ~]# df -hP Filesystem Size Used Avail Use% Mounted on /dev/mapper/vgzhongH-root 30G 3.3G 25G 12% / tmpfs 932M 0 932M 0% /dev/shm /dev/sda1 477M 34M 418M 8% /boot /dev/mapper/vgzhongH-data 4.8G 10M 4.6G 1% /data [root@ZhongH102 ~]# echo "/data 172.16.0.0/16(rw,all_squash,anonuid=1500,anongid=1500)" >> /etc/exports [root@ZhongH102 ~]# /etc/init.d/portreserve restart 停止 portreserve: 正在啟動 portreserve: [確定] [root@ZhongH102 ~]# /etc/init.d/nfs restart 關閉 NFS 守護進程: [確定] 關閉 NFS mountd: [確定] 關閉 NFS quotas: [確定] 關閉 NFS 服務: [確定] Shutting down RPC idmapd: [確定] 啟動 NFS 服務: [確定] 關掉 NFS 配額: [確定] 啟動 NFS mountd: [確定] 啟動 NFS 守護進程: [確定] 正在啟動 RPC idmapd: [確定] [root@ZhongH102 ~]# showmount -e 172.16.6.102 Export list for 172.16.6.102: /data 172.16.0.0/16 /home/wwwroot/data 172.16.0.0/8 [root@ZhongH102 ~]# groupadd -g 1500 mysql && useradd -g mysql -u 1500 -s /sbin/nologin -M mysql #node1 node2 NFS3台服務器的mysql用戶UID GID都要一樣 [root@ZhongH102 ~]# tail -1 /etc/passwd mysql:x:1500:1500::/home/mysql:/sbin/nologin [root@ZhongH102 ~]# chown -R mysql.mysql /data/ [root@ZhongH102 ~]#
2、node1安裝MariaDB
[root@ZhongH100 ~]# yum groupinstall "Development tools" "Server Platform Development" -y [root@ZhongH100 ~]# cd /tmp/ && wget http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz [root@ZhongH101 /tmp]# tar xf cmake-3.2.2.tar.gz [root@ZhongH100 /tmp]# cd cmake-3.2.2 [root@ZhongH100 /tmp/cmake-3.2.2]# ./bootstrap [root@ZhongH100 /tmp/cmake-3.2.2]# [ "$?" = "0" ] && make && make install && which cmake && cd ../ /usr/local/bin/cmake [root@ZhongH100 /tmp]# wget "https://downloads.mariadb.org/interstitial/mariadb-10.0.19/source/mariadb-10.0.19.tar.gz/from/http%3A//mirrors.opencas.cn/mariadb" -O mariadb-10.0.19.tar.gz [root@ZhongH100 /tmp]# tar xf mariadb-10.0.19.tar.gz [root@ZhongH100 /tmp]# cd mariadb-10.0.19 [root@ZhongH100 /tmp/mariadb-10.0.19]# groupadd -g 1500 mysql && useradd -g mysql -u 1500 -s /sbin/nologin -M mysql #node1 node2 NFS3台服務器的mysql用戶UID GID都要一樣 [root@ZhongH100 /tmp/mariadb-10.0.19]# tail -1 /etc/passwd mysql:x:1500:1500::/home/mysql:/sbin/nologin [root@ZhongH100 /tmp/mariadb-10.0.19]# cmake . -DCMAKE_INSTALL_PREFIX=/usr/local/mysql \ -DMYSQL_DATADIR=/data/mysql \ -DWITH_SSL=system \ -DWITH_INNOBASE_STORAGE_ENGINE=1 \ -DWITH_ARCHIVE_STORAGE_ENGINE=1 \ -DWITH_BLACKHOLE_STORAGE_ENGINE=1 \ -DWITH_SPHINX_STORAGE_ENGINE=1 \ -DWITH_ARIA_STORAGE_ENGINE=1 \ -DWITH_XTRADB_STORAGE_ENGINE=1 \ -DWITH_PARTITION_STORAGE_ENGINE=1 \ -DWITH_FEDERATEDX_STORAGE_ENGINE=1 \ -DWITH_MYISAM_STORAGE_ENGINE=1 \ -DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \ -DWITH_EXTRA_CHARSETS=all \ -DWITH_EMBEDDED_SERVER=1 \ -DWITH_READLINE=1 \ -DWITH_ZLIB=system \ -DWITH_LIBWRAP=0 \ -DEXTRA_CHARSETS=all \ -DENABLED_LOCAL_INFILE=1 \ -DMYSQL_UNIX_ADDR=/tmp/mysql.sock \ -DDEFAULT_CHARSET=utf8 \ -DDEFAULT_COLLATION=utf8_general_ci #這裡的編譯參數根據自己的情況適當增減 [root@ZhongH100 /tmp/mariadb-10.0.19]# make -j $(awk '/processor/{i++}END{print i}' /proc/cpuinfo) && make install && echo $? [root@ZhongH100 /tmp/mariadb-10.0.19]# mount -t nfs 172.16.6.102:/data/ /data [root@ZhongH100 /tmp/mariadb-10.0.19]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) 172.16.6.102:/data/ on /data type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) [root@ZhongH100 /tmp/mariadb-10.0.19]# cd /usr/local/mysql/ [root@ZhongH100 /usr/local/mysql]# cp -a support-files/mysql.server /etc/rc.d/init.d/mysqld [root@ZhongH100 /usr/local/mysql]# \cp support-files/my-large.cnf /etc/my.cnf [root@ZhongH100 /usr/local/mysql]# sed -i '/query_cache_size/a datadir = /data/mysql' /etc/my.cnf [root@ZhongH100 /usr/local/mysql]# mkdir -pv /data/mysql mkdir: 已創建目錄 "/data/mysql" [root@ZhongH100 /usr/local/mysql]# /usr/local/mysql/scripts/mysql_install_db --user=mysql --datadir=/data/mysql/ [root@ZhongH100 /usr/local/mysql]# service mysqld start Starting MySQL. [確定] [root@ZhongH100 /usr/local/mysql]# /usr/local/mysql/bin/mysql <<<"show databases;" Database information_schema mysql performance_schema test [root@ZhongH100 /usr/local/mysql]# scp /etc/rc.d/init.d/mysqld 172.16.6.101:/etc/rc.d/init.d/mysqld mysqld 100% 12KB 11.9KB/s 00:00 [root@ZhongH100 /usr/local/mysql]# service mysqld stop Shutting down MySQL.. [確定] [root@ZhongH100 /usr/local/mysql]# cd /tmp/ [root@ZhongH100 /tmp]# scp cmake-3.2.2.tar.gz mariadb-10.0.19.tar.gz 172.16.6.101:/tmp cmake-3.2.2.tar.gz 100% 6288KB 6.1MB/s 00:00 mariadb-10.0.19.tar.gz 100% 54MB 53.6MB/s 00:01 [root@ZhongH100 /tmp]#
3、node2安裝MariaDB
[root@ZhongH101 ~]# yum groupinstall "Development tools" "Server Platform Development" -y [root@ZhongH101 ~]# cd /tmp [root@ZhongH101 /tmp]# tar xf cmake-3.2.2.tar.gz [root@ZhongH101 /tmp]# cd cmake-3.2.2 [root@ZhongH101 /tmp/cmake-3.2.2]# ./bootstrap [root@ZhongH101 /tmp/cmake-3.2.2]# [ "$?" = "0" ] && make && make install && which cmake && cd ../ [root@ZhongH101 /tmp]# tar xf mariadb-10.0.19.tar.gz [root@ZhongH101 /tmp]# cd mariadb-10.0.19 [root@ZhongH101 /tmp/mariadb-10.0.19]# groupadd -g 1500 mysql && useradd -g mysql -u 1500 -s /sbin/nologin -M mysql #node1 node2 NFS3台服務器的mysql用戶UID GID都要一樣 [root@ZhongH101 /tmp/mariadb-10.0.19]# tail -1 /etc/passwd mysql:x:1500:1500::/home/mysql:/sbin/nologin [root@ZhongH101 /tmp/mariadb-10.0.19]# cmake . -DCMAKE_INSTALL_PREFIX=/usr/local/mysql \ -DMYSQL_DATADIR=/data/mysql \ -DWITH_SSL=system \ -DWITH_INNOBASE_STORAGE_ENGINE=1 \ -DWITH_ARCHIVE_STORAGE_ENGINE=1 \ -DWITH_BLACKHOLE_STORAGE_ENGINE=1 \ -DWITH_SPHINX_STORAGE_ENGINE=1 \ -DWITH_ARIA_STORAGE_ENGINE=1 \ -DWITH_XTRADB_STORAGE_ENGINE=1 \ -DWITH_PARTITION_STORAGE_ENGINE=1 \ -DWITH_FEDERATEDX_STORAGE_ENGINE=1 \ -DWITH_MYISAM_STORAGE_ENGINE=1 \ -DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \ -DWITH_EXTRA_CHARSETS=all \ -DWITH_EMBEDDED_SERVER=1 \ -DWITH_READLINE=1 \ -DWITH_ZLIB=system \ -DWITH_LIBWRAP=0 \ -DEXTRA_CHARSETS=all \ -DENABLED_LOCAL_INFILE=1 \ -DMYSQL_UNIX_ADDR=/tmp/mysql.sock \ -DDEFAULT_CHARSET=utf8 \ -DDEFAULT_COLLATION=utf8_general_ci #這裡的編譯參 [root@ZhongH101 /tmp/mariadb-10.0.19]# mount -t nfs 172.16.6.102:/data/ /data [root@ZhongH101 /tmp/mariadb-10.0.19]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) /dev/mapper/vgzhongH-data on /data type ext4 (rw,acl) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/data/ on /data type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.101) [root@ZhongH101 /tmp/mariadb-10.0.19]# ls -l /data/ 總用量 20 drwx------ 2 mysql mysql 16384 5月 24 03:28 lost+found drwxr-xr-x 5 mysql mysql 4096 5月 29 12:54 mysql [root@ZhongH101 /usr/local/mysql]# scp 172.16.6.100:/etc/my.cnf /etc/my.cnf my.cnf 100% 4903 4.8KB/s 00:00 [root@ZhongH101 /usr/local/mysql]# service mysqld start Starting MySQL. [確定] [root@ZhongH101 /usr/local/mysql]# ss -tnl | grep 3306 LISTEN 0 150 :::3306 :::* [root@ZhongH101 /usr/local/mysql]# service mysqld stop Shutting down MySQL.. [確定] [root@ZhongH101 /usr/local/mysql]#
3、修改haresources配置
[root@ZhongH /usr/local/mysql]# grep -v '^#' /etc/ha.d/haresources ZhongH100.wxjr.com.cn IPaddr::172.16.7.200/16/eth0 Filesystem::172.16.6.102:/home/wwwroot/data::/usr/local/nginx/html::nfs Filesystem::172.16.6.102:/data::/data::nfs mysqld nginx
[root@ZhongH100 /tmp]# /etc/init.d/heartbeat restart Stopping High-Availability services: Done. Waiting to allow resource takeover to complete:Done. Starting High-Availability services: INFO: Resource is stopped Done. [root@ZhongH100 /tmp]# ssh [email protected] "/etc/init.d/heartbeat restart" Stopping High-Availability services: Done. Waiting to allow resource takeover to complete:Done. Starting High-Availability services: 2015/05/29_14:03:00 INFO: Resource is stopped Done.
4、查看結果
[root@ZhongH100 /tmp]# netstat -utnlp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2015/master tcp 0 0 0.0.0.0:63705 0.0.0.0:* LISTEN 1581/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 13048/sshd tcp 0 0 0.0.0.0:27108 0.0.0.0:* LISTEN - tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1456/rpcbind tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 53485/nginx tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 17419/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1610/cupsd tcp 0 0 ::1:25 :::* LISTEN 2015/master tcp 0 0 ::1:6010 :::* LISTEN 13048/sshd tcp 0 0 :::19454 :::* LISTEN 1581/rpc.statd tcp 0 0 :::3306 :::* LISTEN 53333/mysqld tcp 0 0 :::111 :::* LISTEN 1456/rpcbind tcp 0 0 :::38934 :::* LISTEN - tcp 0 0 :::22 :::* LISTEN 17419/sshd tcp 0 0 ::1:631 :::* LISTEN 1610/cupsd udp 0 0 127.0.0.1:909 0.0.0.0:* 1581/rpc.statd udp 0 0 0.0.0.0:783 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:50097 0.0.0.0:* 1581/rpc.statd udp 0 0 225.172.16.1:694 0.0.0.0:* 52614/heartbeat: wr udp 0 0 0.0.0.0:68 0.0.0.0:* 1328/dhclient udp 0 0 0.0.0.0:44624 0.0.0.0:* 52614/heartbeat: wr udp 0 0 0.0.0.0:111 0.0.0.0:* 1456/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1610/cupsd udp 0 0 :::783 :::* 1456/rpcbind udp 0 0 :::41894 :::* 1581/rpc.statd udp 0 0 :::111 :::* 1456/rpcbind [root@ZhongH100 /tmp]# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:e6:29:99 brd ff:ff:ff:ff:ff:ff inet 172.16.6.100/16 brd 172.16.255.255 scope global eth0 inet 172.16.7.200/16 brd 172.16.255.255 scope global secondary eth0 inet6 fe80::20c:29ff:fee6:2999/64 scope link valid_lft forever preferred_lft forever 3: pan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN link/ether da:7b:ee:d4:f3:0b brd ff:ff:ff:ff:ff:ff [root@ZhongH100 /tmp]# mount /dev/mapper/vgzhongH-root on / type ext4 (rw,acl) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw) /dev/sda1 on /boot type ext4 (rw) none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw) 172.16.6.102:/home/wwwroot/data on /usr/local/nginx/html type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) 172.16.6.102:/data on /data type nfs (rw,vers=4,addr=172.16.6.102,clientaddr=172.16.6.100) [root@ZhongH100 /tmp]#
5、故障演示
[root@ZhongH100 /tmp]# /etc/init.d/heartbeat stop Stopping High-Availability services: Done. [root@ZhongH100 /tmp]# ip addr 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:e6:29:99 brd ff:ff:ff:ff:ff:ff inet 172.16.6.100/16 brd 172.16.255.255 scope global eth0 inet6 fe80::20c:29ff:fee6:2999/64 scope link valid_lft forever preferred_lft forever 3: pan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN link/ether da:7b:ee:d4:f3:0b brd ff:ff:ff:ff:ff:ff
[root@ZhongH100 /tmp]# ssh [email protected] "netstat -utnlp" Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 2010/master tcp 0 0 0.0.0.0:63449 0.0.0.0:* LISTEN 1589/rpc.statd tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 10386/sshd tcp 0 0 0.0.0.0:30991 0.0.0.0:* LISTEN - tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1464/rpcbind tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 51991/nginx tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 14298/sshd tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1618/cupsd tcp 0 0 :::56089 :::* LISTEN - tcp 0 0 ::1:25 :::* LISTEN 2010/master tcp 0 0 ::1:6010 :::* LISTEN 10386/sshd tcp 0 0 :::3306 :::* LISTEN 51839/mysqld tcp 0 0 :::56044 :::* LISTEN 1589/rpc.statd tcp 0 0 :::111 :::* LISTEN 1464/rpcbind tcp 0 0 :::22 :::* LISTEN 14298/sshd tcp 0 0 ::1:631 :::* LISTEN 1618/cupsd udp 0 0 0.0.0.0:36225 0.0.0.0:* 1589/rpc.statd udp 0 0 127.0.0.1:917 0.0.0.0:* 1589/rpc.statd udp 0 0 0.0.0.0:791 0.0.0.0:* 1464/rpcbind udp 0 0 0.0.0.0:6953 0.0.0.0:* 51009/heartbeat: wr udp 0 0 225.172.16.1:694 0.0.0.0:* 51009/heartbeat: wr udp 0 0 0.0.0.0:68 0.0.0.0:* 1336/dhclient udp 0 0 0.0.0.0:111 0.0.0.0:* 1464/rpcbind udp 0 0 0.0.0.0:631 0.0.0.0:* 1618/cupsd udp 0 0 :::791 :::* 1464/rpcbind udp 0 0 :::111 :::* 1464/rpcbind udp 0 0 :::35954 :::* 1589/rpc.statd [root@ZhongH100 /tmp]# ssh [email protected] "ip addr" 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 link/ether 00:0c:29:14:0f:ea brd ff:ff:ff:ff:ff:ff inet 172.16.6.101/16 brd 172.16.255.255 scope global eth0 inet 172.16.7.200/16 brd 172.16.255.255 scope global secondary eth0 inet6 fe80::20c:29ff:fe14:fea/64 scope link valid_lft forever preferred_lft forever 3: pan0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN link/ether f2:d1:dc:4f:50:3a brd ff:ff:ff:ff:ff:ff