kubernetes HA

[root@k8s-master01 lb]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.229.199 k8s-vip
192.168.229.200 k8s-master01
192.168.229.201 k8s-master02
192.168.229.202 k8s-master03
192.168.229.203 k8s-slave01 
192.168.229.204 k8s-slave02
[root@k8s-master01 lb]# 

[root@localhost ~]# 
[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.229.200
PREFIX=24
GATEWAY=192.168.229.2
DNS1=192.168.229.2
[root@localhost ~]# yum install epel-release -y
[root@localhost ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
[root@localhost ~]# hostnamectl set-hostname k8s-master01

设置防火墙为iptables 并设置空规则
systemctl stop firewalld && systemctl disable firewalld
yum install -y iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
关闭SELINUX
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux

调整内核参数,对于k8s
[root@harbor ~]# modprobe br_netfilter
[root@k8s-master01 ~]#
cat > kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时区
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
systemctl restart rsyslog
systemctl restart crond

关闭不需要的服务
systemctl stop postfix && systemctl disable postfix

设置rsyslogd和systemd journald
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf << EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent

# 压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

# 最大占用空间 10G
SystemMaxUse=10G

# 单日志文件最大 200M
SystemMaxFileSize=200M

# 日志保存时间 2 周
MaxRetentionSec=2week

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF


systemctl restart systemd-journald


升级系统内核为 4.44

查看 Linux 所有内核命令
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg)

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
grub2-set-default "CentOS Linux (4.4.198-1.el7.elrepo.x86_64) 7 (Core)" 
reboot # 设置完成后重启

# 重启后检测内核版本

[root@k8s-master01 ~]# uname -r
4.4.223-1.el7.elrepo.x86_64
[root@k8s-master01 ~]# 


关闭NUMA

cp /etc/default/grub{,.bak}
vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 numa=off 参数,如下所示:
[root@k8s-master01 ~]# cat /etc/default/grub
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet numa=off"
GRUB_DISABLE_RECOVERY="true"
[root@k8s-master01 ~]# 


生成默认文件的备份
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg

reboot


Kubeadm 部署安装
kube-proxy 开启 ipvs 的前置条件

modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
安装docker软件
# 依赖
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce
reboot

# 启动 Docker 并设为开机自启
systemctl start docker
systemctl enable docker

cat > /etc/docker/daemon.json << EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "registry-mirrors": ["https://p02s6s7i.mirror.aliyuncs.com"]
}
EOF

mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

在主节点启动Haproxy与Keepalived容器
安装Kubeadmin(主从配置)
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service

预先拉取镜像
由于 k8s.gcr.io 无法在国内访问到,所以预先加载一下构建集群所需的镜像。
# 在 master01 节点上执行,其他节点从 master01 节点复制
# 创建镜像列表文件 images
登载镜像
[root@k8s-master01 kubernetes_3_master_ha]# docker images
REPOSITORY                              TAG                 IMAGE ID            CREATED             SIZE
quay.io/coreos/flannel                  v0.12.0-amd64       4e9f801d2217        2 months ago        52.8MB
grafana/grafana                         6.6.0               199e063bb422        3 months ago        233MB
gcr.io/kubernetes-helm/tiller           v2.14.3             2d0a693df3ba        9 months ago        94.2MB
k8s.gcr.io/kube-apiserver               v1.15.1             68c3eb07bfc3        10 months ago       207MB
k8s.gcr.io/kube-scheduler               v1.15.1             b0b3c4c404da        10 months ago       81.1MB
k8s.gcr.io/kube-controller-manager      v1.15.1             d75082f1d121        10 months ago       159MB
k8s.gcr.io/kube-proxy                   v1.15.1             89a062da739d        10 months ago       82.4MB
quay.io/prometheus/node-exporter        v0.18.1             e5a616e4b9cf        11 months ago       22.9MB
quay.io/coreos/flannel                  v0.11.0-amd64       ff281650a721        15 months ago       52.6MB
quay.io/coreos/kube-rbac-proxy          v0.4.1              70eeaa7791f2        15 months ago       41.3MB
k8s.gcr.io/coredns                      1.3.1               eb516548c180        16 months ago       40.3MB
k8s.gcr.io/kubernetes-dashboard-amd64   v1.10.1             f9aed6605b81        17 months ago       122MB
k8s.gcr.io/etcd                         3.3.10              2c4adeb21b4f        17 months ago       258MB
wise2c/keepalived-k8s                   latest              0ba6a7862982        23 months ago       14MB
wise2c/haproxy-k8s                      latest              fde31577093d        24 months ago       71.1MB
k8s.gcr.io/pause                        3.1                 da86e6ba6ca1        2 years ago         742kB

[root@k8s-master01 kubernetes_3_master_ha]# cat load_image.sh 
FILE="
coredns.tar
dashboard.tar
etcd.tar
flannel1.tar
flannel.tar
grafana.tar
haproxy-k8s.tar
keepalived.tar
kube-apiserver.tar
kube-controller-manager.tar
kube-proxy.tar
pause.tar
prometheus.tar
rbac.tar
scheduler.tar
tiller.tar
"
for i in $FILE
do
    docker load <  $i
done
[root@k8s-master01 kubernetes_3_master_ha]# ./load_image.sh 

到此结束,关闭虚拟机,做虚拟机的克隆
[root@k8s-master01 kubernetes_3_master_ha]# cp -r data /data

重启虚拟机即可
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
mkdir /usr/local/kubernetes
cd !$
cd /usr/local/kubernetes

[root@k8s-master01 kubernetes]# cd /usr/local/kubernetes
[root@k8s-master01 kubernetes]# cd /data/lb
[root@k8s-master01 lb]# ls
etc  kubeadm-config.yaml  start-haproxy.sh  start-keepalived.sh
[root@k8s-master01 lb]# ls etc/
haproxy.cfg
[root@k8s-master01 lb]# 
[root@k8s-master01 lb]# tail -f etc/haproxy.cfg 
  log global
  balance roundrobin
  server rancher01 192.168.229.200:6443


[root@k8s-master01 lb]# cat start-haproxy.sh 
#!/bin/bash
MasterIP1=192.168.229.200
MasterIP2=192.168.229.201
MasterIP3=192.168.229.202
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
        wise2c/haproxy-k8s
[root@k8s-master01 lb]# 
[root@k8s-master01 lb]# chmod 777 start-haproxy.sh 
[root@k8s-master01 lb]# ./start-haproxy.sh 
[root@k8s-master01 lb]# netstat -anpt | grep :6444
tcp6       0      0 :::6444                 :::*                    LISTEN      2003/docker-proxy   

[root@k8s-master01 lb]# cat start-keepalived.sh 
#!/bin/bash
VIRTUAL_IP=192.168.229.199
INTERFACE=ens33
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s

[root@k8s-master01 lb]# chmod 777 ./start-keepalived.sh 
[root@k8s-master01 lb]# ./start-keepalived.sh 

[root@k8s-master01 lb]# ip addr show | grep 192.168.229
    inet 192.168.229.200/24 brd 192.168.229.255 scope global noprefixroute ens33
    inet 192.168.229.199/24 scope global secondary ens33
[root@k8s-master01 lb]# 
[root@k8s-master01 lb]# cat /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@k8s-master01 lb]# 
yum install -y kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
[root@k8s-master01 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
[root@k8s-master01 ~]# vi kubeadm-config.yaml 
     12   advertiseAddress: 192.168.229.200
     26 controlPlaneEndpoint: "192.168.229.199:6444"
     35 kubernetesVersion: v1.15.1
     36 networking:
     37   dnsDomain: cluster.local
     38   podSubnet: "10.244.0.0/16"
     39   serviceSubnet: 10.96.0.0/12
     40 scheduler: {}
     41 ---
     42 apiVersion: kubeproxy.config.k8s.io/v1alpha1
     43 kind: KubeProxyConfiguration
     44 featureGates:
     45   SupportIPVSProxyMode: true
     46 mode: ipvs

[root@k8s-master01 install]# 
[root@k8s-master01 install]# vi kubeadm-config.yaml 
[root@k8s-master01 ~]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5101d70fcb3c84345801e892fec6deed60b7cac83be2cc358dac4a9de482c6c3 \
    --control-plane --certificate-key 43220eb7d1b8de372f8ec308dc1601ebdb527cf0ce6baf0b38dded519e2ab9fc

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5101d70fcb3c84345801e892fec6deed60b7cac83be2cc358dac4a9de482c6c3 

[root@k8s-master01 ~]# 
[root@k8s-master01 ~]# vi .kube/config

现在先保留一个192.168.229.200,不用动,否则会出现启动一个未启动的节点上
[root@k8s-master01 ~]# scp -r /data root@k8s-master02:/
[root@k8s-master01 ~]# scp -r /data root@k8s-master03:/

[root@k8s-master02 ~]# cd /data/lb/
[root@k8s-master02 lb]# ls
etc  kubeadm-config.yaml  start-haproxy.sh  start-keepalived.sh
[root@k8s-master02 lb]# ./start-haproxy.sh 
[root@k8s-master02 lb]# ./start-keepalived.sh 
[root@k8s-master02 lb]# 
kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:6ecf25b8332b493b271a1bd5fca19c95f8fd1a12f65c3ad9664685b61722f396 \
    --control-plane --certificate-key e0df04746ea328085164b18c530547243fd371859e6287c308d2ce9a8aac7481


 [root@k8s-master02 ~]# mkdir -p $HOME/.kube   
 [root@k8s-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 [root@k8s-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
 [root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   16m     v1.15.1
k8s-master02   NotReady   master   2m17s   v1.15.1
[root@k8s-master02 ~]# 
[root@k8s-master02 .kube]# cat config | grep server
    server: https://192.168.229.199:6444
[root@k8s-master02 .kube]# 
[root@k8s-master03 lb]# ./start-haproxy.sh 


[root@k8s-master03 lb]# systemctl enable kubelet.service
[root@k8s-master03 lb]# kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:6ecf25b8332b493b271a1bd5fca19c95f8fd1a12f65c3ad9664685b61722f396 \
    --control-plane --certificate-key e0df04746ea328085164b18c530547243fd371859e6287c308d2ce9a8aac7481

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config    

[root@k8s-master01 ~]# docker ps| grep HAProxy-K8S
1fe211bad592        wise2c/haproxy-k8s      "/docker-entrypoint.…"   3 hours ago         Up 3 hours          0.0.0.0:6444->6444/tcp   HAProxy-K8S
[root@k8s-master01 ~]# 
[root@k8s-master01 ~]# cd /data/lb/
[root@k8s-master01 lb]# vi etc/haproxy.cfg 
[root@k8s-master01 lb]# cat etc/haproxy.cfg 
  server rancher01 192.168.229.200:6443
  server rancher02 192.168.229.201:6443
  server rancher03 192.168.229.202:6443
[root@k8s-master01 lb]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh

[root@k8s-master01 lb]# scp etc/haproxy.cfg root@k8s-master02:/data/lb/etc/haproxy.cfg 
[root@k8s-master01 lb]# scp etc/haproxy.cfg root@k8s-master03:/data/lb/etc/haproxy.cfg   
[root@k8s-master02 ~]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh
[root@k8s-master03 ~]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh


安装flannel
必须先创建RBAC 否侧会失败
https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml

下载地址kube-flannel.yml文件
https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml

[root@k8s-master01 ~]# kubectl apply -f kube-flannel.yml 


到此还是不能够实现HA,,改成本机IP和6443,不是6444
[root@k8s-master02 ~]# cat .kube/config | grep server
    server: https://192.168.229.201:6443
[root@k8s-master03 ~]# cat .kube/config | grep server
    server: https://192.168.229.202:6443
[root@k8s-master01 ~]# cat .kube/config | grep server
    server: https://192.168.229.200:6443
[root@k8s-master01 ~]#     

查看集群状态
查看kube-controller-manager工作在哪个节点
[root@k8s-master01 ~]#  kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_92c8d2d4-4d75-4460-ac0e-a13b78006f5c","leaseDurationSeconds":15,"acquireTime":"2020-05-20T05:38:22Z","renewTime":"2020-05-20T05:52:28Z","leaderTransitions":1}'
  creationTimestamp: "2020-05-20T05:36:31Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "2103"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: 6252f1e8-0b47-4391-a894-d21b6370317c

[root@k8s-master01 ~]# 查看kube-scheduler运行节点
[root@k8s-master01 ~]#  kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master02_f86541e5-e83a-4845-98d1-7abe25b9af16","leaseDurationSeconds":15,"acquireTime":"2020-05-20T05:38:23Z","renewTime":"2020-05-20T05:52:56Z","leaderTransitions":1}'
  creationTimestamp: "2020-05-20T05:36:31Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "2152"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: ee36393a-b022-4d72-864b-17489bd32127
[root@k8s-master01 ~]# 

kube-api查看不了, 因为是一个HA

[root@k8s-master01 ~]# kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl --endpoints=https://192.168.229.200:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
member 3cef022d23e1fa8c is healthy: got healthy result from https://192.168.229.202:2379
member 90e6e89202e61ff6 is healthy: got healthy result from https://192.168.229.201:2379
member 9b94aa8de130cb6d is healthy: got healthy result from https://192.168.229.200:2379
cluster is healthy
[root@k8s-master01 ~]# 


安装Slave节点

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5101d70fcb3c84345801e892fec6deed60b7cac83be2cc358dac4a9de482c6c3 \
    --control-plane --certificate-key 43220eb7d1b8de372f8ec308dc1601ebdb527cf0ce6baf0b38dded519e2ab9fc

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5101d70fcb3c84345801e892fec6deed60b7cac83be2cc358dac4a9de482c6c3 
根据安装日志,这个输出,在安装slave节点的时候,只要执行
kubeadm join 192.168.229.199:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5101d70fcb3c84345801e892fec6deed60b7cac83be2cc358dac4a9de482c6c3 
就可以了

 

 

 

附录

[root@k8s-master01 lb]# tree
.
├── etc
│   └── haproxy.cfg
├── kubeadm-config.yaml
├── start-haproxy.sh
└── start-keepalived.sh

1 directory, 4 files
[root@k8s-master01 lb]# cat etc/haproxy.cfg 
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
    log     global
    mode    http
    option  httplog
    option  dontlognull
    retries 3
    option redispatch
    timeout connect  5000
    timeout client  50000
    timeout server  50000

frontend stats-front
  bind *:8081
  mode http
  default_backend stats-back

frontend fe_k8s_6444
  bind *:6444
  mode tcp
  timeout client 1h
  log global
  option tcplog
  default_backend be_k8s_6443
  acl is_websocket hdr(Upgrade) -i WebSocket
  acl is_websocket hdr_beg(Host) -i ws

backend stats-back
  mode http
  balance roundrobin
  stats uri /haproxy/stats
  stats auth pxcstats:secret

backend be_k8s_6443
  mode tcp
  timeout queue 1h
  timeout server 1h
  timeout connect 1h
  log global
  balance roundrobin
  server rancher01 192.168.229.200:6443
  server rancher02 192.168.229.201:6443
  server rancher03 192.168.229.202:6443
[root@k8s-master01 lb]# ls
etc  kubeadm-config.yaml  start-haproxy.sh  start-keepalived.sh
[root@k8s-master01 lb]# cat start-haproxy.sh 
#!/bin/bash
MasterIP1=192.168.229.200
MasterIP2=192.168.229.201
MasterIP3=192.168.229.202
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
        wise2c/haproxy-k8s
[root@k8s-master01 lb]# cat start-keepalived.sh 
#!/bin/bash
VIRTUAL_IP=192.168.229.199
INTERFACE=ens33
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s
[root@k8s-master01 lb]# cd
[root@k8s-master01 ~]# ls
anaconda-ks.cfg  haproxy.cfg  kubeadm-config.yaml  kubeadm-init.log  kube-flannel.yml  kubernetes_3_master_ha  kubernetes.conf
[root@k8s-master01 ~]# cat kubeadm-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.229.200
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.229.199:6444"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs
[root@k8s-master01 ~]# 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值