使用HAProxy和keepalived 部署高可用负载均衡器
部署的k8s架构是由3个master节点和多个worker节点组成。master节点上主要运行k8s系统注册服务中心etcd、apiserver、controller-manager、scheduler等服务,需要做高可用,为防止因单点故障导致整个系统无法使用。
1、为3个节点配置网络
节点ip分别是:10.12.70.131、10.12.70.132、10.12.70.133
虚拟的VIP:10.12.70.130
2、配置HAProxy文件
mkdir -p /etc/haproxy/
//创建haproxy.cfg配置文件
vim /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
tune.ssl.default-dh-param 2048
defaults
log global
mode http
option dontlognull
timeout connect 5000ms
timeout client 600000ms
timeout server 600000ms
listen stats
bind :9090
mode http
balance
stats uri /haproxy_stats
stats auth admin:admin123
stats admin if TRUE
frontend kube-apiserver-https
mode tcp
bind :8443
default_backend kube-apiserver-backend
backend kube-apiserver-backend
mode tcp
balance roundrobin
stick-table type ip size 200k expire 30m
stick on src
server apiserver1 10.12.70.131:6443 check
server apiserver2 10.12.70.132:6443 check
server apiserver3 10.12.70.133:6443 check
//创建yaml执行目录
mkdir -p /etc/kubernetes/manifests
//创建haproxy.yaml文件
vim /etc/kubernetes/manifests/haproxy.yaml
kind: Pod
apiVersion: v1
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
component: haproxy
tier: control-plane
name: kube-haproxy
namespace: kube-system
spec:
hostNetwork: true
priorityClassName: system-cluster-critical
containers:
- name: kube-haproxy
image: docker.io/haproxy:1.7-alpine
imagePullPolicy: IfNotPresent //这里最好加上pull的规则,不然重启后系统会反复拉取镜像,若拉取失败将会导致整个HA建立失败,因为haproxy启动不起来,8443服务端口不会运行,而keepalived正常,10.12.70.130不会在本机释放,导致kubectl的操作全部失效。
resources:
requests:
cpu: 100m
volumeMounts:
- name: haproxy-cfg
readOnly: true
mountPath: /usr/local/etc/haproxy/haproxy.cfg
volumes:
- name: haproxy-cfg
hostPath:
path: /etc/haproxy/haproxy.cfg
type: FileOrCreate
3、配置keepalived
//创建配置文件目录
mkdir -p /etc/keepalived/
//创建配置文件
vim /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_1
}
vrrp_script checkhaproxy { //健康检查脚本,检查haproxy,失效自动按照优先级切换,将VIP至下一台机器
scritpt "/usr/bin/check-haproxy.sh"
interval 3
weight -30
}
vrrp_instance VI_1 { //名字必须一致
state BACKUP //有一台设置为master,其余设置为backup
interface enp5s0 //根据实际绑定的网卡配置
virtual_router_id 51 //必须一致
priority 140 //优先级数值越高优先级越高
advert_int 1
authentication {
auth_type PASS
auth_pass txgm2m85331919 //设置密码
}
virtual_ipaddress {
10.12.70.130 //设置虚拟ip
}
track_script { //健康检查
checkhaproxy
}
}
//配置健康检查脚本
vim /usr/bin/check-haproxy.sh
chmod +x /usr/bin/check-haproxy.sh
#! /bin/bash
count=`netstat -npa | grep 8443 | wc -l` //端口号根据实际设置的haproxy.cfg 中frontend的端口号为准
if [ $count -gt 0 ]; then
exit 0
else
exit 1
fi
//创建keepalived.yaml文件
vim /etc/kubernetes/manifests/keepalived.yaml
kind: Pod
apiVersion: v1
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
component: keepalived
tier: control-plane
name: kube-keepalived
namespace: kube-system
spec:
hostNetwork: true
priorityClassName: system-cluster-critical
containers:
- name: kube-keepalived
image: docker.io/osixia/keepalived:stable
resources:
requests:
cpu: 100m
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
volumeMounts:
- name: keepalived-conf
readOnly: true
mountPath: /usr/local/etc/keepalived/keepalived.conf
- name: check-haproxy
readOnly: true
mountPath: /container/service/keepalived/assets/check-haproxy.sh
volumes:
- name: keepalived-conf
hostPath:
path: /etc/keepalived/keepalived.conf
type: FileOrCreate
- name: check-haproxy
hostPath:
path: /usr/bin/check-haproxy.sh
typs: FileOrCreate
创建keepalived.yaml和haproxy.yaml后等待kubeadm安装。