部署keepalive+lvs多master节点高可用
作者:互联网
最低配置2核 2G
192.168.1.11 master1 192.168.1.12 master2
192.168.1.13 master3 192.168.1.20 node1
一、初始化实验环境,各个节点操作
1、修改主机名,配置hosts文件,修改yum源,防火墙,selinux,时间同步,swap
[root@master1 ~]# hostnamectl set-hostname master1
[root@master1 ~]# vim /etc/hosts
192.168.0.6 master1
192.168.0.16 master2
192.168.0.26 master3
192.168.0.56 node1
[root@master1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@master1 ~]# curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
[root@master1 ~]# yum clean all
[root@master1 ~]# yum makecache fast
[root@master1 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@master1 ~]# ntpdate cn.pool.ntp.org
[root@master1 ~]# crontab -e
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@master1 ~]# systemctl restart crond
[root@master1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@master1 ~]# swapoff -a
[root@master1 ~]# sed -i '/swap/s/^/#/g' /etc/fstab
[root@master1 ~]# reboot -f
2、修改内核参数,docker配置
[root@master1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master1 ~]# modprobe br_netfilter
[root@master1 ~]# sysctl --system
[root@master1 ~]# mkdir -p /etc/docker
[root@master1 ~]# cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
[root@master1 ~]# yum install -y docker-ce-19.03.7-3.el7
[root@master1 ~]# systemctl enable docker && systemctl start docker
3、网桥配置永久生效,开启ipvs
[root@master1 ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
[root@master1 ~]# echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
[root@master1 ~]# echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
""" > /etc/sysctl.conf
[root@master1 ~]# sysctl -p
[root@master1 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
[root@master1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
[root@master1 ~]# lsmod | grep ip_vs
nf_nat 26583 4 ip_vs_ftp,nf_nat_ipv4,nf_nat_ipv6,nf_nat_masquerade_ipv4
ip_vs_sed 12519 0
ip_vs_nq 12516 0
ip_vs_sh 12688 0
ip_vs_dh 12688 0
ip_vs_lblcr 12922 0
ip_vs_lblc 12819 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 35
ip_vs_wlc 12519 0
ip_vs_lc 12516 0
ip_vs 145458 59 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack 139264 9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
4、安装kubernetes1.18.2
[root@master1 ~]# yum -y install kubelet-1.18.2 kubeadm-1.18.2
Installed:
kubeadm.x86_64 0:1.18.2-0 kubelet.x86_64 0:1.18.2-0
Dependency Installed:
cri-tools.x86_64 0:1.19.0-0 kubectl.x86_64 0:1.23.0-0 kubernetes-cni.x86_64 0:0.8.7-0
上传镜像到master1、master2、master3和node1节点
[root@master1 ~]# for i in `ls *.gz`;do docker load -i $i;done
[root@master1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 2 years ago 288MB
k8s.gcr.io/kube-apiserver v1.18.2 6ed75ad404bd 20 months ago 173MB
k8s.gcr.io/kube-scheduler v1.18.2 a3099161e137 20 months ago 95.3MB
k8s.gcr.io/kube-controller-manager v1.18.2 ace0a8c17ba9 20 months ago 162MB
node上只需要下面三个
k8s.gcr.io/pause 3.2 80d28bedfe5d 22 months ago 683kB
k8s.gcr.io/coredns 1.6.7 67da37a9a360 22 months ago 43.8MB
k8s.gcr.io/kube-proxy v1.18.2 0d40868643c6 20 months ago 117MB
部署keepalive+lvs实现master节点高可用-对apiserver做高可用
keepalived.conf全删除重写,以192.168.1.188位虚拟IP写入
[root@master1 ~]# yum install -y socat keepalived ipvsadm conntrack
[root@master1 ~]# systemctl enable kubelet
[root@master1 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens33
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
192.168.1.188
}
}
virtual_server 192.168.1.188 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR
net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 192.168.1.11 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.1.12 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.1.13 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
其他两个master节点区别在于
7 interface ens33 #实际网卡名
9 priority 100 #权重100 110 90
[root@master2 ~]# vim /etc/keepalived/keepalived.conf
[root@master3 ~]# vim /etc/keepalived/keepalived.conf
在master1、master2、master3依次执行如下命令
[root@master1 ~]# systemctl enable keepalived.service && systemctl start keepalived.service [root@master1 ~]# systemctl status keepalived.service
[root@master1 ~]# ip add
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:9d:7b:09 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.11/24 brd 192.168.0.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.1.188/32 scope global ens33
[root@master1 ~]# ping 192.168.1.188
在master1节点初始化k8s集群
[root@master1 ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
controlPlaneEndpoint: 192.168.1.188:6443
apiServer:
certSANs:
#证书生成的节点
- 192.168.1.11
- 192.168.1.12
- 192.168.1.13
- 192.168.1.21
- 192.168.1.188
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
初始化k8s集群
[root@master1 ~]# kubeadm init --config kubeadm-config.yaml
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \
--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \
--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750
在master1节点执行如下,这样才能有权限操作k8s资源
[root@master1 ~]# mkdir -p $HOME/.kube
[root@master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 NotReady master 8m11s v1.18.2
[root@master1 ~]# kubectl get pods -n kube-system
...
coredns-7ff77c879f-j48h6 0/1 Pending 0 3m16s
coredns-7ff77c879f-lrb77 0/1 Pending 0 3m16s
因为没有安装网络插件,所以还是 node NotReady,coredns 是Pending状态,需要安装calico或者flannel。
[root@master1 ~]# docker load -i cni.tar.gz;docker load -i calico-node.tar.gz
[root@master1 ~]# vim calico.yaml
167 value: "can-reach=192.168.1.11"
181 value: "10.244.0.0/16"
[root@master1 ~]# kubectl apply -f calico.yaml
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready master 37m v1.18.2
master2、master3同样操作
把master1节点的证书拷贝到master2和master3上
(1)在master2和master3上创建证书存放目录
[root@master2 ~]# cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
(2)在master1节点把证书拷贝到master2和master3上
[root@master1 ~]# cd /etc/kubernetes/pki/
[root@master1 pki]# for i in ca* sa* front-proxy-ca*;do scp /etc/kubernetes/pki/$i master2:/etc/kubernetes/pki/;done
ca.crt 100% 1025 452.5KB/s 00:00
ca.key 100% 1679 1.5MB/s 00:00
sa.key 100% 1679 1.0MB/s 00:00
sa.pub 100% 451 176.1KB/s 00:00
front-proxy-ca.crt 100% 1038 369.9KB/s 00:00
front-proxy-ca.key 100% 1679 1.4MB/s 00:00
[root@master1 pki]# scp /etc/kubernetes/pki/etcd/ca* master2:/etc/kubernetes/pki/etcd/
ca.crt 100% 1017 774.0KB/s 00:00
ca.key 100% 1017 774.0KB/s 00:00
(3)证书拷贝之后在master2和master3上执行如下命令
这个数据在master1初始化的时候有显示
[root@master2~]# kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \
--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750 \
--control-plane
[root@master2~]# mkdir -p $HOME/.kube
[root@master2~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master2~]# sudo chown $(id -u):$(id -g)$HOME/.kube/config```
#### 把node1节点加入到k8s集群,在node1节点操作
[root@node1~]# kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \
--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750
在master1节点查看集群节点状态
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready master 65m v1.18.2
master2 Ready master 5m15s v1.18.2
master3 Ready master 2m4s v1.18.2
node1 Ready <none> 29s v1.18.2
各个节点安装traefik,master1上做证书
[root@master1 ~]# docker load -i traefik_1_7_9.tar.gz
[root@master1 ~]# mkdir ikube/tls -p
echo """
[req]
distinguished_name = req_distinguished_name
prompt = yes
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_value=CN
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_value = Beijing
localityName= Locality Name (eg, city)
localityName_value = Haidian
organizationName = Organization Name (eg, company)
organizationName_value = Channelsoft
organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_value = R & D Department
commonName = Common Name (eg, your name or your server\'s hostname)
commonName_value = *.multi.io
emailAddress = Email Address
emailAddress_value = lentil1016@gmail.com
""" > ikube/tls/openssl.cnf
[root@master1 ~]# openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
[root@master1 ~]# kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key
secret/ssl created
[root@master1 ~]# kubectl apply -f traefik.yaml
[root@master1 ~]# kubectl get pod -n kube-system -owide |grep traefik
traefik-ingress-controller-dqb6f 1/1 Running 0 62s 192.168.1.11 master1 <none>
traefik-ingress-controller-nwsk5 1/1 Running 0 62s 192.168.1.13 master3 <none>
traefik-ingress-controller-pwxpx 1/1 Running 0 62s 192.168.1.12 master2 <none>
traefik-ingress-controller-qxqkh 1/1 Running 0 62s 192.168.1.20 node1 <none>
安装kubernetes-dashboard-2版本
[root@master1 ~]# docker load -i dashboard_2_0_0.tar.gz ;docker load -i metrics-scrapter-1-0-1.tar.gz
[root@master1 ~]# kubectl apply -f kubernetes-dashboard.yaml
[root@master1 ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-694557449d-pj9cp 1/1 Running 0 16s
kubernetes-dashboard-5f98bdb684-h6c9t 1/1 Running 0 17s
[root@master1 ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.107.91.74 <none> 8000/TCP 49s
kubernetes-dashboard ClusterIP 10.99.59.88 <none> 443/TCP 50s
修改service type类型变成NodePort:clusterIP只能集群内部访问
[root@master1 ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
30 type: NodePort
[root@master1 ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.107.91.74 <none> 8000/TCP 11m
kubernetes-dashboard NodePort 10.99.59.88 <none> 443:30260/TCP 11m
访问https://192.168.1.188/ #可以登录,关闭任意1个master再次刷新依然能够进入登录界面
通过yaml文件里指定的默认的token登陆dashboard
[root@master1 ~]# kubectl get secret -n kubernetes-dashboard
NAME TYPE DATA AGE
default-token-wzcwc kubernetes.io/service-account-token 3 16m
kubernetes-dashboard-certs Opaque 0 16m
kubernetes-dashboard-csrf Opaque 1 16m
kubernetes-dashboard-key-holder Opaque 2 16m
kubernetes-dashboard-token-lqws6 kubernetes.io/service-account-token 3 16m
[root@master1 ~]# kubectl describe secret kubernetes-dashboard-token-lqws6 -n kubernetes-dashboard
...
token: eyJhbGciOiJSUzI1NiIsI...超级长串,复制它
再次登入填入token
此时默认是只能看到default名称空间内容
创建管理员token,可查看任何空间权限
[root@master1 ~]# kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
刷新就可以看到和操作任何名称空间的资源了
安装metrics插件,此时kubectl top还不能用
[root@master1 ~]# kubectl top nodes
error: Metrics API not available 此时top还不能用
[root@master1 ~]# docker load -i metrics-server-amd64_0_3_1.tar.gz ;docker load -i addon.tar.gz
[root@master1 ~]# kubectl apply -f metrics.yaml
[root@master1 ~]# kubectl get pod -n kube-system -owide
metrics-server-8459f8db8c-r6mrz 2/2 Running 0 43s 10.244.3.4 node1
[root@master1 ~]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
master1 225m 11% 1220Mi 70%
master2 171m 8% 1165Mi 67%
master3 150m 7% 1114Mi 64%
node1 97m 4% 671Mi 39%
[root@master1 ~]# kubectl top pods -n kube-system
NAME CPU(cores) MEMORY(bytes)
calico-node-5rk6g 21m 29Mi
coredns-66bff467f8-hnwkm 3m 10Mi
etcd-master1 57m 89Mi
如果你想在非集群节点使用kubect命令,你可以
mkdir ~/.kube
然后scp /root/.kube/config 和kubect 当前主机
kubect -s https://master-IP:6443 get nodes
标签:master1,lvs,kubernetes,ip,192.168,vs,master,root,keepalive 来源: https://blog.csdn.net/weixin_60092693/article/details/122018854