其他分享
首页 > 其他分享> > keepalived实现lvs高可用

keepalived实现lvs高可用

作者:互联网

keeplaived实现lvs高可用

名称 ip
node1(lvs,keepalived) 192.168.6.152
node2(lvs,keepalived) 192.168.6.153
rs1 192.168.6.135
rs2 192.168.6.154
# 在Rs1,Rs2下载nginx写一个网页文件
[root@rs1 ~]# dnf -y install nginx
[root@rs1 ~]# echo 'rs1' > /usr/share/nginx/html/index.html
[root@rs1 ~]# systemctl enable --now nginx
Created symlink /etc/systemd/system/multi-user.target.wants/nginx.service → /usr/lib/systemd/system/nginx.service.


[root@rs2 ~]# dnf -y install nginx
[root@rs2 ~]# echo 'rs2' > /usr/share/nginx/html/index.html
[root@rs2 ~]# systemctl enable --now nginx
Created symlink /etc/systemd/system/multi-user.target.wants/nginx.service → /usr/lib/systemd/system/nginx.service.

 

# 在192.168.6.152 lvs 调度器上配置VIP和DIP
[root@node1 ~]# cd /etc/sysconfig/network-scripts/
[root@node1 network-scripts]# ls
ifcfg-ens160
[root@node1 network-scripts]# vim ifcfg-ens160
TYPE=Ethernet
BOOTPROTO=static
NAME=ens160
DEVICE=ens160
ONBOOT=yes
IPADDR0=192.168.6.152
NETMASK=255.255.255.0
GATEWAY=192.168.6.2
DNS1=114.114.114.114
IPADDR1=192.168.6.250   # 加一个250的vip
NETMASK1=255.255.255.0   #添加一个子网掩码  
[root@node1 network-scripts]# ifdown ens160;ifup ens160  # 虽然网卡起来了,但是走不了外网,应为有两个网关,我们可以配本地yum仓库
Connection 'ens160' successfully deactivated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/2)
Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/3)
[root@node1 network-scripts]# ip a  # 
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:d7:b0:2d brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.152/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

#配置本地yum仓库  
[root@node1 ~]# cd /etc/yum.repos.d/    
[root@node1 yum.repos.d]# ls
CentOS-Base.repo   epel.repo                  epel-testing.repo
epel-modular.repo  epel-testing-modular.repo
[root@node1 yum.repos.d]# mv * /opt/   # 把配好的阿里云的yum源移走
[root@node1 yum.repos.d]# ls
[root@node1 yum.repos.d]# mount /dev/cdrom /mnt/  # 挂载镜像
mount: /mnt: WARNING: device write-protected, mounted read-only.    
[root@node1 yum.repos.d]# vi xx.repo    # 配置本地源
[appstream]
name=appstream
baseurl=file:///mnt/AppStream
gpgcheck=0
enabled=1
[baseos]
name=baseos
baseurl=file:///mnt/BaseOS
gpgcheck=0
enabled=1

[root@node1 yum.repos.d]# yum clean all
36 files removed
[root@node1 yum.repos.d]# yum makecache
# 在192.168.6.153 lvs 调度器上配置VIP和DIP
[root@node2 ~]# cd /etc/sysconfig/network-scripts/
[root@node2 network-scripts]# vim ifcfg-ens160
TYPE=Ethernet
BOOTPROTO=static
NAME=ens160
DEVICE=ens160
ONBOOT=yes
IPADDR0=192.168.6.153
NETMASK=255.255.255.0
GATEWAY=192.168.6.2
DNS1=114.114.114.114
IPADDR1=192.168.6.250
NETMASK1=255.255.255.0
[root@node2 network-scripts]# ifdown ens160;ifup ens160

[root@node2 network-scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:42:d0:bc brd ff:ff:ff:ff:ff:ff
inet 192.168.6.153/24 brd 192.168.6.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
valid_lft forever preferred_lft forever

[root@node2 ~]# cd /etc/yum.repos.d/
[root@node2 yum.repos.d]# ls
CentOS-Base.repo   epel.repo                  epel-testing.repo
epel-modular.repo  epel-testing-modular.repo
[root@node2 yum.repos.d]# mv * /opt/
[root@node2 yum.repos.d]# ls

[root@node2 yum.repos.d]# mount /dev/cdrom /mnt/
mount: /mnt: WARNING: device write-protected, mounted read-only.
[root@node2 yum.repos.d]# vi xx.repo


[root@node2 yum.repos.d]# yum clean all
36 files removed
[root@node2 yum.repos.d]# yum makecache


[appstream]
name=appstream
baseurl=file:///mnt/AppStream
gpgcheck=0
enabled=1
[baseos]
name=baseos
baseurl=file:///mnt/BaseOS
gpgcheck=0
enabled=1
 
#在R1和R2上配置RIP
[root@rs1 ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens160

TYPE=Ethernet
BOOTPROTO=static
NAME=ens160
DEVICE=ens160
ONBOOT=yes
IPADDR=192.168.6.135
NETMASK=255.255.255.0
GATEWAY=192.168.6.2
DNS1=114.114.114.114
[root@rs1 ~]# ifdown ens160;ifup ens160 

[root@rs2 ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens160

TYPE=Ethernet
BOOTPROTO=static
NAME=ens160
DEVICE=ens160
ONBOOT=yes
IPADDR=192.168.6.154
NETMASK=255.255.255.0
GATEWAY=192.168.6.2
DNS1=114.114.114.114
[root@rs2 ~]# ifdown ens160;ifup ens160 
[root@rs1 ~]# vi /etc/sysctl.conf

# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
net.ipv4.conf.all.arp_ignore = 1     #添加
net.ipv4.conf.all.arp_announce = 2
[root@rs1 ~]# sysctl -p
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2

[root@rs2 ~]# vi /etc/sysctl.conf

# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
[root@rs2 ~]# sysctl -p
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
#在RS1和RS2上配置VIP
[root@rs1 ~]# yum -y install net-tools
[root@rs1 ~]# ifconfig lo:0 192.168.6.250/32 broadcast 192.168.6.250 up
[root@rs1 ~]# ifconfig lo:0 192.168.6.250/32 broadcast 192.168.6.250 up
[root@rs1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/0 brd 192.168.6.250 scope global lo:0
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:e2:21:19 brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.135/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
[root@rs1 ~]# route add -host 192.168.6.180 dev lo
[root@rs1 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.6.2     0.0.0.0         UG    100    0        0 ens160
192.168.6.0     0.0.0.0         255.255.255.0   U     100    0        0 ens160
192.168.6.180   0.0.0.0         255.255.255.255 UH    0      0        0 lo

[root@rs2 ~]# yum -y install net-tools
[root@rs2 ~]# ifconfig lo:0 192.168.6.250/32 broadcast 192.168.6.250 up
[root@rs2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/0 brd 192.168.6.250 scope global lo:0
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group defaultqlen 1000
    link/ether 00:0c:29:bf:50:39 brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.154/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
[root@rs2 ~]# route add -host 192.168.6.180 dev lo
[root@rs2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.6.2     0.0.0.0         UG    100    0        0 ens160
192.168.6.0     0.0.0.0         255.255.255.0   U     100    0        0 ens160
192.168.6.180   0.0.0.0         255.255.255.255 UH    0      0        0 lo
#node1 node2 添加规则并且保存
[root@node1 ~]# yum -y install ipvsadm
[root@node1 ~]# ipvsadm -A -t 192.168.6.250:80 -s rr
[root@node1 ~]# ipvsadm -a -t 192.168.6.250:80 -r 192.168.6.135:80 -g
[root@node1 ~]# ipvsadm -a -t 192.168.6.250:80 -r 192.168.6.154:80 -g
[root@node1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.6.250:80 rr
  -> 192.168.6.135:80             Route   1      0          0
  -> 192.168.6.154:80             Route   1      0          0
[root@node1 ~]# ipvsadm -Sn > /etc/sysconfig/ipvsadm
[root@node1 ~]# systemctl enable ipvsadm
Created symlink /etc/systemd/system/multi-user.target.wants/ipvsadm.service → /usr/lib/systemd/system/ipvsadm.service.
[root@node1 ~]# echo "ipvsadm -R < /etc/sysconfig/ipvsadm" >>/etc/rc.d/rc.local


[root@node2 yum.repos.d]# yum -y install ipvsadm
[root@node2 ~]# ipvsadm -A -t 192.168.6.250:80 -s rr
[root@node2 ~]# ipvsadm -a -t 192.168.6.250:80 -r 192.168.6.135:80 -g
[root@node2 ~]# ipvsadm -a -t 192.168.6.250:80 -r 192.168.6.154:80 -g
[root@node2 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.6.250:80 rr
  -> 192.168.6.135:80             Route   1      0          0
  -> 192.168.6.154:80             Route   1      0          0
[root@node2 ~]# ipvsadm -Sn > /etc/sysconfig/ipvsadm
[root@node2 ~]# echo "ipvsadm -R < /etc/sysconfig/ipvsadm" >>/etc/rc.d/rc.local

 

 

下载keepalived配置主keepalived

[root@node1 ~]# dnf -y install keepalived   # 下载
[root@node1 ~]# cd /etc/keepalived/    
[root@node1 keepalived]# ls
keepalived.conf
[root@node1 keepalived]# cp keepalived.conf{,.aa} #复制一个原本默认的配置文件修改名字备份一下
[root@node1 keepalived]# ls
keepalived.conf  keepalived.conf.aa
[root@node1 keepalived]# > keepalived.conf   #内容清空
[root@node1 keepalived]# vim keepalived.conf   #编辑主配置文件
! Configuration File for keepalived

global_defs {       # 全局配置
   router_id lb01
}

vrrp_instance VI_1 {    #定义实例
    state MASTER     # 定义keepalived节点的初始状态,可以为MASTER和BACKUP
    interface ens160   # VRRP实施绑定的网卡接口,
    virtual_router_id 51   # 虚拟路由的ID,同一集群要一致
    priority 100           #定义优先级,按优先级来决定主备角色优先级越大越有限
    advert_int 1              # 主备通讯时间间隔
    authentication {          #配置认证
        auth_type PASS          #认证方式此处为密码
        auth_pass 023654  # 修改密码
    }
    virtual_ipaddress {    #要使用的VIP地址
        192.168.6.250   # 修改vip
    }
}

virtual_server 192.168.6.250 80 {   # 配置虚拟服务器
    delay_loop 6              # 健康检查时间间隔
    lb_algo rr                # lvs调度算法
    lb_kind DR                #lvs模式
    persistence_timeout 50    #持久化超时时间,单位是秒
    protocol TCP               #4层协议

    real_server 192.168.6.152 80 {     # 定义真实处理请求的服务器
        weight 1              # 给服务器指定权重,默认为1
        TCP_CHECK {
            connect_port 80   #端口号为80
            connect_timeout 3   # 连接超时时间
            nb_get_retry 3       # 连接次数
            delay_before_retry 3   # 在尝试之前延迟多少时间
        }
    }

    real_server 192.168.6.153 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}
[root@node1 keepalived]# systemctl enable --now keepalived  # 启动并开机自启
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@node1 keepalived]# systemctl status keepalived  # 查看状态
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disa>
   Active: active (running) since Thu 2022-09-01 23:57:20 CST; 10s ago
  Process: 11618 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/>
 Main PID: 11620 (keepalived)
    Tasks: 3 (limit: 11202)
   Memory: 2.1M
   CGroup: /system.slice/keepalived.service
           ├─11620 /usr/sbin/keepalived -D
           ├─11621 /usr/sbin/keepalived -D
           └─11622 /usr/sbin/keepalived -D

下载keepalived配置备keepalived

[root@node2 ~]# dnf -y install keepalived
[root@node2 ~]# cd /etc/keepalived/
[root@node2 keepalived]# ls
keepalived.conf
[root@node2 keepalived]# cp keepalived.conf{,.aa}
[root@node2 keepalived]# > keepalived.conf
[root@node2 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id lb02
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens160
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 023654
    }
    virtual_ipaddress {
        192.168.6.250
    }
}

virtual_server 192.168.6.250 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    persistence_timeout 50
    protocol TCP

    real_server 192.168.6.152 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.6.153 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

[root@node2 keepalived]# systemctl enable --now keepalived  #启动并设置开机自启
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@node2 keepalived]# systemctl status keepalived   # 查看状态
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disa>
   Active: active (running) since Fri 2022-09-02 00:02:04 CST; 12s ago
  Process: 15241 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/>
 Main PID: 15242 (keepalived)
    Tasks: 3 (limit: 11202)
   Memory: 2.4M
   CGroup: /system.slice/keepalived.service
           ├─15242 /usr/sbin/keepalived -D
           ├─15243 /usr/sbin/keepalived -D
           └─15244 /usr/sbin/keepalived -D

查看VIP在哪里

#  在MASTER上查看
[root@node1 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:d7:b0:2d brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.152/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/32 scope global ens160    # vip在主节点上
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

#在SLAVE上查看
[root@node2 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:42:d0:bc brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.153/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

让keepalived监控lvs
keepalived通过脚本来监控lvs的状态
在node1上编写脚本

[root@node1 ~]# mkdir /scripts
[root@node1 ~]# cd /scripts/
[root@node1 scripts]# vim notify.sh
#!/bin/bash

case "$1" in
  master)
          ipvsadm -R < /etc/sysconfig/ipvsadm
  ;;
  backup)
          ipvsadm -C
  ;;
  *)
          echo "Usage:$0 master|backup"
  ;;
esac

[root@node1 scripts]# chmod +x notify.sh
[root@node1 scripts]# ll
total 4
-rwxr-xr-x 1 root root 178 Sep 2 00:43 notify.sh

 

在node2上编写脚本

[root@node2 keepalived]# vim /etc/keepalived/keepalived.conf
[root@node2 keepalived]# mkdir /scripts
[root@node2 keepalived]# cd /scripts/
[root@node2 scripts]# vim notify.sh

#!/bin/bash

case "$1" in
  master)
          ipvsadm -R < /etc/sysconfig/ipvsadm
  ;;
  backup)
          ipvsadm -C
  ;;
  *)
          echo "Usage:$0 master|backup"
  ;;
esac

[root@node2 scripts]# chmod +x notify.sh
[root@node2 scripts]# ll
total 4
-rwxr-xr-x 1 root root 178 Sep 2 00:47 notify.sh

 

配置keepalived加入监控脚本的配置

配置node1上的keepalived

加入脚本

 

[root@node1 scripts]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id lb01
}

vrrp_instance VI_1 {
    state BACKUP        #不抢占此处都为备  ,如果设置抢占把两边的nopreempt都删除掉,此处改为MASTER为主
    interface ens160
    virtual_router_id 51
    priority 100
    nopreempt           # 设置的不抢占,
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 023654
    }
    virtual_ipaddress {
        192.168.6.250
    }
    notify_master "/scripts/notify.sh master 192.168.6.250"
    notify_backup "/scripts/notify.sh backup 192.168.6.250"
}

virtual_server 192.168.6.250 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    persistence_timeout 50
    protocol TCP

    real_server 192.168.6.152 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.6.153 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

[root@node1 scripts]# systemctl restart keepalived
[root@node1 scripts]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disa>
Active: active (running) since Fri 2022-09-02 00:57:07 CST; 9s ago
Process: 11678 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/>
Main PID: 11679 (keepalived)
Tasks: 3 (limit: 11202)
Memory: 3.0M
CGroup: /system.slice/keepalived.service
├─11679 /usr/sbin/keepalived -D
├─11680 /usr/sbin/keepalived -D
└─11681 /usr/sbin/keepalived -D

 

配置node2上的keepalived

加入脚本

[root@node2 scripts]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id lb02
}

vrrp_instance VI_1 {
    state BACKUP         # 不抢占模式必须都为备
    interface ens160
    virtual_router_id 51
    priority 90
    nopreempt         #  不抢占
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 023654
    }
    virtual_ipaddress {
        192.168.6.250
    }
    notify_master "/scripts/notify.sh master 192.168.6.250"
    notify_backup "/scripts/notify.sh backup 192.168.6.250"
}
}

virtual_server 192.168.6.250 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    persistence_timeout 50
    protocol TCP

    real_server 192.168.6.152 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }

    real_server 192.168.6.153 80 {
        weight 1
        TCP_CHECK {
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

[root@node2 scripts]# systemctl restart keepalived
[root@node2 scripts]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disa>
Active: active (running) since Fri 2022-09-02 00:57:58 CST; 8s ago
Process: 15273 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/>
Main PID: 15274 (keepalived)
Tasks: 3 (limit: 11202)
Memory: 3.2M
CGroup: /system.slice/keepalived.service
├─15274 /usr/sbin/keepalived -D
├─15276 /usr/sbin/keepalived -D
└─15277 /usr/sbin/keepalived -D

 

验证,模拟node1出现故障,此处是不抢占模式

# 先查看状态node1,vip还在手里
[root@node1 scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:d7:b0:2d brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.152/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/32 scope global ens160   # vip
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

#查看node2,没有vip
[root@node2 scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:42:d0:bc brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.153/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

# 关掉node1
[root@node1 scripts]# init 0

#查看node2,在node1出现故障的时候抢占了vip成为主,继续让服务运行,从而达到主备故障切换
[root@node2 scripts]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:0c:29:42:d0:bc brd ff:ff:ff:ff:ff:ff
    inet 192.168.6.153/24 brd 192.168.6.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/32 scope global ens160   #  有vip了
       valid_lft forever preferred_lft forever
    inet 192.168.6.250/24 brd 192.168.6.255 scope global secondary noprefixroute ens160
       valid_lft forever preferred_lft forever

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

标签:00,lvs,可用,keepalived,forever,192.168,lft,root
来源: https://www.cnblogs.com/sunyiming023654/p/16646582.html