1、keepalived VRRP 介绍
keepalived是集群管理中保证集群高可用的一个服务软件,用来防止单节点故障。
keepalived是以VRRP协议为实现基础的,VRRP虚拟路由冗余协议,主要有三个模块,分别是core、check和vrrp。core模块为keepalived的核心,负责主进程的启动、维护以全局配置文件的加载和解析。check负责健康检查,包括常见的各种检查方法。vrrp模块是来实现vrrp协议的。
2、nginx+keepalived实现七层的负载均衡
nginx通过upstream模块实现负载均衡
配置给所有机器安装nginx,关闭防火墙和selinux
systemctl stop firewalld && setenforce 0
[root@nginx-proxy ~]# cd /etc/yum.repos.d/
[root@nginx-proxy yum.repos.d]# vim nginx.repo
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=0
enabled=1
[root@nginx-proxy yum.repos.d]# yum install yum-utils -y
[root@nginx-proxy yum.repos.d]# yum install nginx -y
1)选择两台nginx服务器作为代理服务器。(两台nginx服务器配置文件一致)
2)给两台代理服务器安装keepalived制作高可用集群,生成VIP
3)配置nginx的负载均衡(配置upstream文件)
[root@proxy ~]# cd /etc/nginx/conf.d/
[root@proxy conf.d]# mv default.conf default.conf.bak
[root@proxy conf.d]# vim upstream.conf
upstream index {
server 192.168.73.60:80 weight=1 max_fails=2 fail_timeout=2;
server 192.168.73.70:80 weight=2 max_fails=2 fail_timeout=2;
}
[root@proxy conf.d]# vim proxy.conf
server {
listen 80;
server_name localhost;
access_log /var/log/nginx/host.access.log main;
location / {
proxy_pass http://index;
proxy_redirect default;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
将nginx的配置文件拷贝到另一台代理服务器中:
[root@nginx-proxy-master conf.d]# scp proxy.conf 192.168.246.161:/etc/nginx/conf.d/
[root@nginx-proxy-master conf.d]# scp upstream.conf 192.168.246.161:/etc/nginx/conf.d/
3、keepalived实现调度器HA
1)主/备调度器上都安装软件
[root@master ~]# yum install -y keepalived
[root@slave ~]# yum install -y keepalived
[root@slave ~]# cd /etc/nginx/conf.d/
[root@slave conf.d]# mv default.conf default.conf.bak
[root@master ~]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id directory1 #辅助改为directory2
}
vrrp_instance VI_1 {
state MASTER #定义主还是备
interface ens33 #VIP绑定接口
virtual_router_id 80 #整个集群的调度器一致
priority 100 #优先级,backup改为50
advert_int 1 #心跳检测的时间间隔1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.246.16/24
}
}
[root@slave ~]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id directory2
}
vrrp_instance VI_1 {
state BACKUP #设置为backup
interface ens33
nopreempt #设置到back上面,不抢占资源(VIP)
virtual_router_id 80
priority 50 #辅助改为50
advert_int 1 #检测间隔1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.246.16/24
}
}
2)启动keepalived(主备均启动)
[root@master ~]# systemctl start keepalived
[root@master ~]# systemctl enable keepalived
[root@slave ~]# systemctl start keepalived
[root@slave ~]# systemctl enable keepalived
[root@master ~]# ip addr
3)扩展对调度器nginx健康检查(可选)两台都设置
(1)script
[root@master ~]# vim /etc/keepalived/check_nginx_status.sh
#!/bin/bash
/usr/bin/curl -I http://localhost &>/dev/null
if [ $? -ne 0 ];then
# /etc/init.d/keepalived stop
systemctl stop keepalived
fi
[root@master ~]# chmod a+x /etc/keepalived/check_nginx_status.sh
(2)keepalived使用script
[root@master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id directory1
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx_status.sh"
interval 5
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.246.16/24
}
track_script {
check_nginx
}
}
#注:必须先启动Nginx,再启动keepalived
4)测试访问
将keepalived集群的主节点的nginx服务关闭,查看vip是否飘移,如果飘移,即成功。
浏览器访问:http://192.168.73.51
5、lvs_Director+keepalived
1)主/备调度器安装软件
[root@master ~]# yum -y install ipvsadm keepalived
[root@slave ~]# yum -y install ipvsadm keepalived
2)keepalived
lvs-master
[root@master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-master #辅助改为lvs-backup
}
vrrp_instance VI_1 {
state MASTER
interface ens33 #VIP绑定接口
virtual_router_id 80 #VRID 同一组集群,主备一致
priority 100 #本节点优先级,辅助改为50
advert_int 1 #检查间隔,默认为1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
}
virtual_server 192.168.73.51 80 { #LVS配置
delay_loop 3 #启动3个进程
lb_algo rr #LVS调度算法
lb_kind DR #LVS集群模式(路由模式)
nat_mask 255.255.255.0
protocol TCP #健康检查使用的协议
real_server 192.168.73.70 80 {
weight 1
inhibit_on_failure #当该节点失败时,把权重设置为0,而不是从IPVS中删除
TCP_CHECK { #健康检查
connect_port 80 #检查的端口
connect_timeout 3 #连接超时的时间
}
}
real_server 192.168.73.88 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
[root@slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-slave
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
nopreempt #不抢占资源
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
}
virtual_server 192.168.73.51 80 {
delay_loop 3
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
protocol TCP
real_server 192.168.73.70 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
real_server 192.168.73.88 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
3)启动keepalived(主备均启动)
[root@master ~]# systemctl start keepalived
[root@master ~]# systemctl enable keepalived
[root@master ~]# ipvsadm -Ln
[root@slave ~]# systemctl start keepalived
[root@slave ~]# systemctl enable keepalived
[root@slave ~]# ipvsadm -Ln
4)所有RS配置(nginx1,nginx2)
[root@test-nginx1 ~]# yum install -y nginx
[root@test-nginx2 ~]# yum install -y nginx
[root@test-nginx1 ~]# ip addr add dev lo 192.168.73.50/32
[root@test-nginx2 ~]# ip addr add dev lo 192.168.73.50/32
[root@test-nginx1 ~]# echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore #忽略arp广播
[root@test-nginx1 ~]# echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce #匹配精确ip地址回包
[root@test-nginx2 ~]# echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore #忽略arp广播
[root@test-nginx2 ~]# echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce #匹配精确ip地址回包
[root@test-nginx1 ~]# echo "web1..." >> /usr/share/nginx/html/index.html
[root@test-nginx2 ~]# echo "web2..." >> /usr/share/nginx/html/index.html
[root@test-nginx1 ~]# systemctl start nginx
5)集群测试
6、mysql+keepalived
项目环境
VIP 192.168.246.100
mysql1 192.168.246.162 keepalived-master
mysql2 192.168.246.163 keepalived-slave
安装数据库两个节点都操作
[root@mysql-keepalived-master ~]# yum -y install mariadb-server mariadb
[root@mysql-keepalived-master ~]# systemctl start mariadb
[root@mysql-keepalived-master ~]# mysql
节点1创建qf1库名,以便测试
MariaDB [(none)]> create database qf1;
创建一个客户端能够测试连接的用户
MariaDB [(none)]> grant all privileges on *.* to root@'%' identified by '123456';
MariaDB [(none)]> flush privileges;
MariaDB [(none)]> quit;
[root@mysql-keepalived-slave ~]# yum -y install mariadb-server mariadb
[root@mysql-keepalived-slave ~]# systemctl start mariadb
[root@mysql-keepalived-slave ~]# mysql
节点1创建qf2库名,以便测试
MariaDB [(none)]> create database qf2;
创建一个客户端能够测试连接的用户
MariaDB [(none)]> grant all privileges on *.* to root@'%' identified by '123456';
MariaDB [(none)]> flush privileges;
MariaDB [(none)]> quit;
安装keepalived---两台机器都操作
[root@mysql-keepalived-master ~]# yum -y install keepalived
[root@mysql-keepalived-slave ~]# yum -y install keepalived
keepalived主备配置文件
192.168.73.50 master配置
[root@mysql-keepalived-master ~]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@mysql-keepalived-master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id master
}
vrrp_script check_run {
script "/etc/keepalived/keepalived_chech_mysql.sh"
interval 5
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 89
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
track_script {
check_run
}
}
slave 192.168.73.60 配置
[root@mysql-keepalived-slave ~]# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@mysql-keepalived-slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id backup
}
vrrp_script check_run {
script "/etc/keepalived/keepalived_chech_mysql.sh"
interval 5
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens33
virtual_router_id 89
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
track_script {
check_run
}
}
mysql状态监测脚本/root/keepalived_check_mysql.sh(两台mysql同样的脚本)
脚本
[root@mysql-keepalived-master ~]# vim /etc/keepalived/keepalived_chech_mysql.sh
#!/bin/bash
/usr/bin/mysql -h192.168.73.50(第一台输入自己ip,第二台输入自己的ip) -uroot -p'QianFeng@2019!' -e "show status" &>/dev/null
if [ $? -ne 0 ] ;then
# service keepalived stop
systemctl stop keepalived
fi
或者
#!/bin/bash
/usr/bin/mysql -e "show status" &>/dev/null
if [ $? -ne 0 ] ;then
# service keepalived stop
systemctl stop keepalived
fi
[root@mysql-keepalived-master ~]# chmod +x /etc/keepalived/keepalived_chech_mysql.sh
两边均启动keepalived
[root@mysql-keepalived-master ~]# systemctl restart keepalived
[root@mysql-keepalived-master ~]# systemctl enable keepalived
测试
[root@client ~]# mysql -uroot -p -h 192.168.73.51
[root@mysql-keepalived-master ~]# systemctl stop mariadb
[root@client ~]# mysql -uroot -p -h 192.168.73.51
7、haproxy基础
1)haproxy实现七层负载准备工作
[root@ha-proxy-master ~]# cat /etc/hosts
127.0.0.1 localhost
192.168.73.50 ha-proxy-master
192.168.73.60 ha-proxy-slave
192.168.73.70 test-nginx1
192.168.73.88 test-nginx2
2)RS配置
[root@test-nginx1 ~]# yum install -y nginx
[root@test-nginx1 ~]# systemctl start nginx
[root@test-nginx1 ~]# echo "test-nginx1" >> /usr/share/nginx/html/index.html
3)调度器配置haproxy(主/备)都执行
[root@ha-proxy-master ~]# yum -y install haproxy
[root@ha-proxy-master ~]# cp -rf /etc/haproxy/haproxy.cfg{,.bak}
[root@ha-proxy-master ~]# sed -i -r '/^[ ]*#/d;/^$/d' /etc/haproxy/haproxy.cfg
[root@ha-proxy-master ~]# vim /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2 info
pidfile /var/run/haproxy.pid
maxconn 4000 #最大连接数
user haproxy
group haproxy
daemon #以后台形式运行ha-proxy
nbproc 1 #工作进程数量 cpu内核是几就写几
defaults
mode http #工作模式 http ,tcp 是 4 层,http是 7 层
log global
retries 3 #健康检查。3次连接失败就认为服务器不可用,主要通过后面的check检查
option redispatch #服务不可用后重定向到其他健康服务器。
maxconn 4000 #优先级中
contimeout 5000 #ha服务器与后端服务器连接超时时间,单位毫秒ms
clitimeout 50000 #客户端超时
srvtimeout 50000 #后端服务器超时
listen stats
bind *:81
stats enable
stats uri /haproxy #使用浏览器访问 http://192.168.246.169:81/haproxy,可以看到服务器状态
stats auth qianfeng:123 #用户认证,客户端使用elinks浏览器的时候不生效
frontend web
mode http
bind *:80 #监听哪个ip和什么端口
option httplog #日志类别 http 日志格式
acl html url_reg -i \.html$ #1.访问控制列表名称html。规则要求访问以html结尾的url(可选)
use_backend httpservers if html #2.如果满足acl html规则,则推送给后端服务器httpservers
default_backend httpservers #默认使用的服务器组
backend httpservers #名字要与上面的名字必须一样
balance roundrobin #负载均衡的方式
server http1 192.168.73.70:80 maxconn 2000 weight 1 check inter 1s rise 2 fall 2
server http2 192.168.73.88:80 maxconn 2000 weight 1 check inter 1s rise 2 fall 2
将配置文件拷贝到slave服务器
[root@ha-proxy-master ~]# scp /etc/haproxy/haproxy.cfg 192.168.73.60:/etc/haproxy/
启动两台机器设置开机自启
[root@ha-proxy-master ~]# systemctl start haproxy
[root@ha-proxy-master ~]# systemctl enable haproxy
4)测试主/备(浏览器访问)
192.168.73.50:81/haproxy
192.168.73.60:81/haproxy
如果出现bind失败的报错,执行下列命令
setsebool -P haproxy_connect_any=1
5)keepalived实现调度器HA
(1)主/备调度器安装软件
[root@ha-proxy-master ~]# yum install -y keepalived
[root@ha-proxy-slave ~]# yum install -y keepalived
[root@ha-proxy-master ~]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@ha-proxy-master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id director1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
}
[root@ha-proxy-slave ~]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
[root@ha-proxy-slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id directory2
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
nopreempt
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
}
(2)启动keepalived(主备均启动)
[root@ha-proxy-master ~]# systemctl start keepalived
[root@ha-proxy-master ~]# systemctl enable keepalived
[root@ha-proxy-master ~]# ip a
(3)扩展对调度器haproxy健康检查
必须先启动haproxy,再启动keepalived
[root@ha-proxy-master ~]# cat /etc/keepalived/check_haproxy_status.sh
#!/bin/bash
/usr/bin/curl -I http://localhost &>/dev/null
if [ $? -ne 0 ];then
# /etc/init.d/keepalived stop
systemctl stop keepalived
fi
[root@ha-proxy-master ~]# chmod a+x /etc/keepalived/check_haproxy_status.sh
b. keepalived使用script
[root@ha-proxy-master keepalived]# cd /etc/keepalived/
[root@ha-proxy-master keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id director1
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy_status.sh"
interval 5
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
track_script {
check_haproxy
}
}
[root@ha-proxy-slave keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id directory2
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy_status.sh"
interval 5
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
nopreempt
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.73.51/24
}
track_script {
check_haproxy
}
}
[root@ha-proxy-master keepalived]# systemctl restart keepalived
[root@ha-proxy-slave keepalived]# systemctl restart keepalived
(4)两台机器都配置haproxy的日志:需要打开注释并添加
[root@ha-proxy-master ~]# vim /etc/rsyslog.conf
# Provides UDP syslog reception #由于haproxy的日志是用udp传输的,所以要启用rsyslog的udp监听
$ModLoad imudp
$UDPServerRun 514
找到 #### RULES #### 下面添加
local2.* /var/log/haproxy.log
[root@ha-proxy-master ~]# systemctl restart rsyslog
[root@ha-proxy-master ~]# systemctl restart haproxy
[root@ha-proxy-master ~]# tail -f /var/log/haproxy.log
|