上一篇博文通过Haproxy 实现了对后端web服务器的负载均衡,这次用nginx来完成这个任务,原理上大致一样,只是nginx只能对http和mail做负载均衡,这比较有局限性,但是它对正则表达式的支持好!下面直接看实验:

【实验环境】

nginx keepalived 主:192.168.56.120
nginx keepalived 从:192.168.56.121
VIP :192.168.56.130
Web1 : 192.168.56.113

Web2:192.168.56.114

【实验拓扑】

一、安装配置nginx
1、配置主机名(node2同node1)
[root@localhost ~]# vim /etc/sysconfig/network
HOSTNAME=node1
[root@localhost ~]# vim /etc/hosts
192.168.56.120 node1
192.168.56.121 node2
192.168.56.113 web1
192.168.56.114 web2

[root@localhost ~]# hostname node1

2、安装配置nginx(node2配置同node1)
[root@node1 src]# useradd -s /sbin/nologin -M www
[root@node1 src]# wget
[root@node1 src]# yum -y install pcre-devel openssl-devel perl-ExtUtils-Embed
[root@node1 src]# tar xf nginx-1.5.3.tar.gz
[root@node1 src]# cd nginx-1.5.3
[root@node1 nginx-1.5.3]# ./configure --prefix=/usr/local/nginx --user=www --group=www --with-http_ssl_module --with-http_gzip_static_module --without-http_uwsgi_module --without-http_scgi_module --without-http_upstream_ip_hash_module --with-http_perl_module --with-pcre
[root@node1 nginx-1.5.3]# make && make install

配置nginx

[root@node1 ~]# vim /usr/local/nginx/conf/nginx.conf
user www www;
worker_processes 8;
error_log logs/error.log;

pid logs/nginx.pid;

events {
worker_connections 1024;

}

http {
include mime.types;
default_type application/octet-stream;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;

gzip on;

upstream web_server_pool {
#ip_hash; #如果需要保持session一致,需要开启这个选项,可以保证同一台机器每次访问都分配到同一服务器
server 192.168.56.113:80 weight=4 max_fails=2 fail_timeout=30s;
server 192.168.56.114:80 weight=4 max_fails=2 fail_timeout=30s;

}

server {
listen 80;
server_name 192.168.56.120; # node2 改为192.168.56.121
location / {
root html;
index index.html index.htm;
proxy_pass http://web_server_pool;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}

}

[root@node1 ~]# /usr/local/nginx/sbin/nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok

nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successfully

启动服务

[root@node1 ~]# /usr/local/nginx/sbin/nginx

3、安装配置web服务器
web服务器的配置(web2同web1,只需要将相应部分改为web2即可)
[root@localhost ~]# hostname web1
[root@web1 ~]# vim /etc/hosts
192.168.56.120 node1
192.168.56.121 node2
192.168.56.113 web1
192.168.56.114 web2
[root@web1 ~]# yum install -y httpd
[root@web1 ~]# service httpd start
[root@web1 ~]# echo "<h1>welcome to web1</h1>" > /var/www/html/index.html

4、测试Nginx负载均衡
在确保node1 的防火墙和selinux关闭的情况下,打开浏览器,访问
ip

也可通过curl测试:
[root@node1 ~]# yum -y install curl
[root@node1 ~]# curl -dump http://192.168.56.120/
<h1>welcome to web1</h1>
[root@node1 ~]# curl -dump http://192.168.56.120/
<h1>welcome to web2</h1>
[root@node1 ~]# curl -dump http://192.168.56.120/
<h1>welcome to web1</h1>
[root@node1 ~]# curl -dump http://192.168.56.120/

<h1>welcome to web2</h1>

二、安装配置keepalived(node2配置参考node1,大体上一样)
1、安装配置keepalived
[root@node1 src]#
yum -y install popt popt-devel popt-static openssl-devel kernel-devel libnl libnl-devel
[root@node1 src]# wget http://www.keepalived.org/software/keepalived-1.2.4.tar.gz
[root@node1 src]# tar xf keepalived-1.2.4.tar.gz
[root@node1 src]# cd keepalived-1.2.4
[root@node1 keepalived-1.2.4]# ./configure --prefix=/usr/local/keepalived

[root@node1 keepalived-1.2.4]# make && make install

[root@node1 keepalived-1.2.4]# cp /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/
[root@node1 keepalived-1.2.4]# cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
[root@node1 keepalived-1.2.4]# cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@node1 keepalived-1.2.4]# mkdir -p /etc/keepalived
[root@node1 keepalived-1.2.4]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/

[root@node1 keepalived-1.2.4]# chmod +x /etc/init.d/keepalived

配置
[root@node1 keepalived-1.2.4]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
pmghong@163.com
}
notification_email_from pmghong@163.com
smtp_server 192.168.56.120 # node2上改为192.168.56.121
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_nginx { # 定义监控脚本
script "/etc/keepalived/checkNginx.sh"
interval 2
weight 2
}
vrrp_instance VI_1 {
state MASTER # node2 修改为BACKUP
interface eth0
virtual_router_id 51
priority 120 # node2上的数值应低于这个值,例如100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.56.130
}
track_script {
# 执行监控脚本,这段代码一定要加!!否则不会调用上面的脚本
chk_nginx
}

}

2、创建检测脚本
[root@node1 keepalived-1.2.4]# vim /etc/keepalived/checkNginx.sh
#!/bin/bash
#auto check nginx process
killall -0 nginx
if [[ $? -ne 0 ]];then
/etc/init.d/keepalived stop

fi

[root@node1 keepalived-1.2.4]# chmod +x /etc/keepalived/checkNginx.sh

3、启动服务
[root@node1 ~]# service keepalived start
Starting keepalived: [ OK ]
[root@node2 ~]# service keepalived start
Starting keepalived: [ OK ]
[root@node1 ~]# ip addr
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:95:99:b7 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.120/24 brd 192.168.56.255 scope global eth0
inet 192.168.56.130/32 scope global eth0
inet6 fe80::a00:27ff:fe95:99b7/64 scope link

valid_lft forever preferred_lft forever

4、测试
(1) 打开浏览器测试访问

关闭node1上的nginx 服务测试效果

PS:网上很多资料在测试这一步,都是停掉node1的keepalived服务,然后测试是否成功。我觉得这样有点不妥,毕竟实际环境中keepalived 停止服务的概率相对于LB和web服务器来说还是很低很低的,这个实验的关键在于,看node1上的nginx由于故障停止服务的时候,node2能否通过keepalived接管负载均衡的功能,并使得网站的访问正常,不间断。所以这里测试应该停的是node1上的nginx,而不是keepalived。

[root@node1 keepalived]# netstat -nultp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name

tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 3786/nginx

[root@node1 keepalived]# kill 3786

(2)找一台不相关的机器长ping VIP

实验结果是只丢了一个包

(3)测试页面访问情况
[root@node2 ~]# ip addr
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:7f:4b:aa brd ff:ff:ff:ff:ff:ff
inet 192.168.56.121/24 brd 192.168.56.255 scope global eth0
inet 192.168.56.130/32 scope global eth0
inet6 fe80::a00:27ff:fe7f:4baa/64 scope link
valid_lft forever preferred_lft forever

从上面可以看到VIP 跑到node2 上

网站的访问仍然正常
[root@node2 ~]# curl -dump http://192.168.56.130
<h1>welcome to web1</h1>
[root@node2 ~]# curl -dump http://192.168.56.130
<h1>welcome to web2</h1>
[root@node2 ~]# curl -dump http://192.168.56.130
<h1>welcome to web1</h1>
[root@node2 ~]# curl -dump http://192.168.56.130
<h1>welcome to web2</h1>