一、配置说明
1、虚拟服务器配置参数:
virtual server (虚拟服务)的定义: virtual_server IP port#定义虚拟主机IP地址及其端口 virtual_server fwmark int#ipvs的防火墙打标,实现基于防火墙的负载均衡集群 virtual_server group string#将多个虚拟服务器定义成组,将组定义成虚拟服务
virtual_server IP port {
...
real_server {
...
}
...
}
delay_loop :检查后端服务器的时间间隔 lb_algo rr|wrr|lc|wlc|lblc|sh|dh:定义调度方法 lb_kind NAT|DR|TUN:集群的类型 persistence_timeout :持久连接时长 protocol TCP|UDP|SCTP:指定服务协议 sorry_server :所有RS故障时,备用服务器地址
real_server <IPADDR> <PORT> {
weight <INT> RS权重
notify_up <STRING>|<QUOTED-STRING> RS上线通知脚本
notify_down <STRING>|<QUOTED-STRING> RS下线通知脚本
HTTP_GET|SSL_GET|TCP_CHECK|SMTP_CHECK|MISC_CHEC K { ... }:定义当前主机的健康状态检测方法
}
2、 应用层监测
HTTP_GET|SSL_GET:应用层检测
HTTP_GET|SSL_GET {
url {
path <URL_PATH>:定义要监控的URL
status_code <INT>:判断上述检测机制为健康状态的响应码
}
connect_timeout <INTEGER>:客户端请求的超时时长, 等于haproxy的timeout server
nb_get_retry <INT>:重试次数
delay_before_retry <INT>:重试之前的延迟时长
connect_ip <IP ADDRESS>:向当前RS哪个IP地址发起健康状态检测请求
connect_port <PORT>:向当前RS的哪个PORT发起健康状态检测请求
bindto <IP ADDRESS>:发出健康状态检测请求时使用的源地址
bind_port <PORT>:发出健康状态检测请求时使用的源端口
}
3、TCP监测
TCP_CHECK 传输层检测
TCP_CHECK {
connect_ip<IP ADDRESS>:向当前RS的哪个IP地址发起健康状态检测请求
connect_port<PORT>:向当前RS的哪个PORT发起健康状态检测请求
bindto<IP ADDRESS>:发出健康状态检测请求时使用的源地址
bind_port<PORT>:发出健康状态检测请求时使用的源端口
connect_timeout<INTEGER>:客户端请求的超时时长, 等于haproxy的timeout server
}
二、测试验证
1、规划
IP | 主机名 | 服务 |
---|
10.10.100.106 vip10.10.100.110 | node1 | Keepalived + LVS | 10.10.100.107 vip10.10.100.110 | node2 | Keepalived + LVS | 10.10.100.102 vip10.10.100.110 | node3 | nginx | 10.10.100.103 vip10.10.100.110 | node4 | nginx |
2、实现LVS-DR模式
2.1 node3,node4 测试页面
[root@node1 ~]
web01 10.10.100.102
[root@node1 ~]
web02 10.10.100.103
2.2 node3,node4 绑定VIPlo网卡
[root@node3 ~]
vip=10.10.100.110
mask='255.255.255.255'
case $1 in
start)
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig lo:0 $vip netmask $mask broadcast $vip up
route add -host $vip dev lo:0
;;
stop)
ifconfig lo:0 down
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
;;
*)
echo "Usage $(basename $0) start|stop"
exit 1
;;
esac
[root@node3 ~]
[root@node3 ~]
lo:0: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 10.10.100.110 netmask 255.255.255.255
loop txqueuelen 1000 (Local Loopback)
2.3 keepalived 配置
2.3.1 TCP监测
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id ha1.example.com
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
vrrp_mcast_group4 224.0.0.18
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 80
priority 100
advert_int 1
unicast_src_ip 10.10.100.106
unicast_peer {
10.10.100.107
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.10.100.110 dev eth0 label eth0:0
}
}
virtual_server 10.10.100.110 80 {
delay_loop 3
lb_algo rr
lb_kind DR
protocol TCP
real_server 10.10.100.102 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 10.10.100.103 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
[root@node1 keepalived]
[root@node1 keepalived]
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.10.100.110:80 rr
-> 10.10.100.102:80 Route 1 3 4
-> 10.10.100.103:80 Route 1 0 2
[root@localhost ~]
web02 10.10.100.103
web01 10.10.100.102
web02 10.10.100.103
web01 10.10.100.102
web02 10.10.100.103
2.3.2 http监测
real_server 10.10.100.102 80 {
weight 1
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.10.100.103 80 {
weight 1
HTTP_GET {
url {
path /index.html
status_code 200
}
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
}
}
3、VRRP Script使用
keepalived调用外部的辅助脚本进行资源监控,并根据监控的结果状态能实现优先动态调整 vrrp_script:自定义资源监控脚本,vrrp实例根据脚本返回值进行下一步操作,脚本可被多个实例调用 track_script:调用vrrp_script定义的脚本去监控资源,定义在实例之内,调用事先定义的vrrp_script
vrrp_script <SCRIPT_NAME> {
script <STRING>|<QUOTED-STRING>
OPTIONS
}
track_script {
SCRIPT_NAME_1
SCRIPT_NAME_2
}
vrrp_script <SCRIPT_NAME> {
script <STRING>|<QUOTED-STRING>
interval <INTEGER>
timeout <INTEGER>
weight <INTEGER:-254..254>
fall <INTEGER>
rise <INTEGER>
user USERNAME [GROUPNAME]
init_fail
}
3.1 基于第三方仲裁设备
# touch /data/keepalived/device/down #当存在此仲裁设备时发生故障迁移,适用场景:在backup服务器探测文件是否存在,当master运行正常的时候没有此文件,当master异常的时候生成此文件,然后将backup服务器的优先级设置高于master,从而将VIP接管到backup服务器
3.3.1 示例
vrrp_script chk_down {
script "/bin/bash -c '[[ -f /etc/keepalived/device/down ]]' && exit 0|| exit 7"
interval 2
weight 80
fall 3
rise 3
timeout 2
user root
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 80
priority 90
advert_int 1
unicast_src_ip 10.10.100.107
unicast_peer {
10.10.100.106
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.10.100.110 dev eth0 label eth0:0
}
track_script {
chk_down
}
}
测试验证可以看到当BACKUP监测到/etc/keepalived/device/down文件存在时,会增加优先级,将vip抢占过来
4、高可用HAProxy
vrrp_script chk_haproxy {
script "/etc/keepalived/chk_haproxy.sh"
interval 2
weight -80
fall 3
rise 3
timeout 2
user root
}
vrrp_instance VI_1 {
...
track_script {
chk_haproxy
}
}
[root@node1 ]
[root@node1 keepalived]
/usr/bin/killall -0 haproxy
[root@node1 keepalived]
5、高可用Nginx
vrrp_script chk_nginx {
script "/etc/keepalived/chk_nginx.sh"
interval 2
weight -80
fall 3
rise 3
timeout 2
user root
}
vrrp_instance VI_1 {
...
track_script {
chk_nginx
}
}
[root@node1 keepalived]
/usr/bin/killall -0 nginx
[root@node1 keepalived]
|