keepalived解决LVS的高可用问题(四层)七层靠haproxy
配置两个nignx机器并写入内容
[root@web1 ~]#echo web1.meng.org `hostname -I` > /var/www/html/index.html
[root@web2 ~]#echo web2.meng.org `hostname -I` > /var/www/html/index.html
另一机器测试
[root@ubuntu2004 ~]#curl 10.0.0.8
web1.meng.org 10.0.0.8
[root@ubuntu2004 ~]#curl 10.0.0.18
web2.meng.org 10.0.0.18
当10.0.0.100的80端口收到请求,就调度到后面的web01 web02上
虚拟服务器配置结构
每一个虚拟服务器即一个IPVS集群 可以通过下面模板的语法实现
复制模板以下内容写到conf.d配置文件中
模板内容
[root@ka1 conf.d]#cat /usr/local/src/keepalived-2.2.7/doc/samples/keepalived.conf.sample
virtual_server 10.10.10.2 1358 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCP
sorry_server 192.168.200.200 1358
real_server 192.168.200.2 1358 {
weight 1
HTTP_GET {
url {
path /testurl3/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
connect_timeout 3
retry 3
delay_before_retry 3
}
}
}
写到conf.d里面的业务块里面
[root@ka1 conf.d]#cd /etc/keepalived/conf.d/
[root@ka1 conf.d]#ls
www.meng.com.conf www.meng.org.conf
[root@ka1 conf.d]#cat www.meng.org.conf
vrrp_instance VI_1 {
state MASTER
interface eth1
virtual_router_id 66
priority 100
advert_int 1
#nopreempt
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
10.0.0.100/24 dev eth0 label eth0:1
}
unicast_src_ip 192.168.10.100
unicast_peer{
192.168.10.101
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
virtual_server 10.0.0.100 80 { #上面vip实现地址,下面的virtual_server才能用
delay_loop 6 #检查后端服务器的时间间隔,实现LVS的后端检查
lb_algo rr #定义调度方法
lb_kind NAT #集群的类型,注意要大写
persistence_timeout 50 #持久连接时长
protocol TCP #指定服务协议,一般为TCP (TCP|UDP|SCTP)
sorry_server 192.168.200.200 1358 #所有RS故障时,备用服务器地址
real_server 192.168.200.2 1358 { #真实后端服务器,有几个服务器就写几个,一般至少两个
weight 1 #后端服务器权重
notify_up <STRING>|<QUOTED-STRING> #后端服务器上线通知脚本
notify_down <STRING>|<QUOTED-STRING> #后端服务器下线通知脚本
HTTP_GET { #定义当前主机健康状态检测方法
url {
path /testurl3/test.jsp #定义要监控的URL
status_code #判断上述检测机制为健康状态的响应码,一般为 200
}
connect_timeout 3 #客户端请求的超时时长, 相当于haproxy的timeout server
retry 3 #重试次数
delay_before_retry 3 #重试之前的延迟时长
}
}
}
HTTP_GET|SSL_GET|TCP_CHECK|SMTP_CHECK|MISC_CHECK { ... }#定义当前主机健康状态检测方法
应用层监测
应用层检测 : HTTP_GET|SSL_GET
HTTP_GET|SSL_GET {
url {
path <URL_PATH> #定义要监控的URL
status_code <INT> #判断上述检测机制为健康状态的响应码,一般为 200
}
connect_timeout <INTEGER> #客户端请求的超时时长, 相当于haproxy的timeout server
nb_get_retry <INT> #重试次数
delay_before_retry <INT> #重试之前的延迟时长
connect_ip <IP ADDRESS> #向当前RS哪个IP地址发起健康状态检测请求
connect_port <PORT> #向当前RS的哪个PORT发起健康状态检测请求
bindto <IP ADDRESS #向当前RS发出健康状态检测请求时使用的源地址
bind_port <PORT> #向当前RS发出健康状态检测请求时使用的源端口
}
TCP检测
传输层检测:TCP_CHECK
TCP_CHECK {
connect_ip <IP ADDRESS> #向当前RS的哪个IP地址发起健康状态检测请求
connect_port <PORT> #向当前RS的哪个PORT发起健康状态检测请求
bindto <IP ADDRESS> #发出健康状态检测请求时使用的源地址
bind_port <PORT> #发出健康状态检测请求时使用的源端口
connect_timeout <INTEGER> #客户端请求的超时时长,等于haproxy的timeout server
}
范例
第一步:配置虚拟服务器结构
[root@ka1 conf.d]#cat www.meng.org.conf
vrrp_instance VI_1 {
state MASTER
interface eth1
virtual_router_id 66
priority 100
advert_int 1
#nopreempt
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
10.0.0.100/24 dev eth0 label eth0:1
}
unicast_src_ip 192.168.10.100
unicast_peer{
192.168.10.101
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
virtual_server 10.0.0.100 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#persistence_timeout 50
protocol TCP
#sorry_server 127.0.0.1 80
real_server 10.0.0.8 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 10.0.0.18 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
第二步:由于是DR模型,后端nginx全部机器需配虚拟地址,修改内核(脚本完成)
root@web1 ~]#cat lvs_dr_rs.sh
#!/bin/bash
#Author:mengfanchao
#Date:2022-10-29
vip=10.0.0.100
mask='255.255.255.255'
dev=lo:1
#rpm -q httpd &> /dev/null || yum -y install httpd &>/dev/null
#service httpd start &> /dev/null && echo "The httpd Server is Ready!"
#echo "`hostname -I`" > /var/www/html/index.html
case $1 in
start)
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $dev $vip netmask $mask #broadcast $vip up
#route add -host $vip dev $dev
echo "The RS Server is Ready!"
;;
stop)
ifconfig $dev down
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo "The RS Server is Canceled!"
;;
*)
echo "Usage: $(basename $0) start|stop"
exit 1
;;
esac
第三步:执行脚本查看配置
[root@web1 ~]#bash lvs_dr_rs.sh start
The RS Server is Ready!
[root@web1 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet 10.0.0.100/32 scope global lo:1
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:80:d5:83 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.100/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe80:d583/64 scope link
valid_lft forever preferred_lft forever
重启keepalived服务并查看ipvs规则
[root@ka1 conf.d]#systemctl restart keepalived
[root@ka1 conf.d]#ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.0.0.100:80 wrr
-> 10.0.0.8:80 Route 1 0 0
-> 10.0.0.18:80 Route 1 0 0
把以下内容配置到所有结点的www.meng.org.conf 中,并查看ipvs规则配置是否完成
virtual_server 10.0.0.100 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#persistence_timeout 50
protocol TCP
#sorry_server 127.0.0.1 80
real_server 10.0.0.8 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 10.0.0.18 80 {
weight 1
TCP_CHECK {
connect_timeout 5
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
[root@ka2 conf.d]#systemctl restart keepalived
[root@ka2 conf.d]#ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.0.0.100:80 wrr
-> 10.0.0.8:80 Route 1 0 0
-> 10.0.0.18:80 Route 1 0 0
用前端客户端测试10.0.0.100
[root@ubuntu2004 ~]#curl 10.0.0.100
web2.meng.org 10.0.0.18
[root@ubuntu2004 ~]#curl 10.0.0.100
web1.meng.org 10.0.0.8
挂掉keepalived服务观察结果 (可用循环测试 )
[root@ubuntu2004 ~]#while true;do curl 10.0.0.100;sleep 0.5;done
web2.meng.org 10.0.0.18
web1.meng.org 10.0.0.8
web2.meng.org 10.0.0.18
web1.meng.org 10.0.0.8
停止ka1的keepalived服务,查看循环是否有影响,结果是无影响
[root@ka1 conf.d]#hostname -I
10.0.0.101 10.0.0.100 192.168.10.100
[root@ka1 conf.d]#systemctl stop keepalived
[root@ka1 conf.d]#hostname -I
10.0.0.101 192.168.10.100
观察前端客户端访问情况无异常
如果再挂掉一台后端nginx服务器,就不再往上调度,现象如下
curl: (7) Failed to connect to 10.0.0.100 port 80: 拒绝连接
web2.meng.org 10.0.0.18
curl: (7) Failed to connect to 10.0.0.100 port 80: 拒绝连接
web2.meng.org 10.0.0.18
web2.meng.org 10.0.0.18
标签:11,10.0,LVS,Keepalived,connect,conf,80,meng,root
From: https://blog.51cto.com/mfc001/6416499