CH01_安装方式B_kubeadm_03_control-plane_node_20190917

kube-apiserver-k8s-master01
etcd-k8s-master01
kube-controller-manager-k8s-master01
kube-scheduler-k8s-master01

kubelet
kube-proxy-lcgr4

原理:mater节点高可用,主要解决API server的高可用问题(keepalived/ heartbeat + nginx/haproxy)
方案:睿云Breeze_keeplived+haproxy
涉及的脚本及配置脚本文件有5个:

  1. /data/lb/etc/haproxy.cfg
  2. /data/lb/start-haproxy.sh
  3. /data/lb/start-keepalived.sh
  4. kubeadm-config.yaml
  5. kube-flannel.yml(wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

1.在主节点启动 Haproxy 与 keepalived 容器(mmm)

1.1 启动haproxy容器

  1. 导入镜像
    $ scp -r 2_haproxy-keepalived root@k8s-ha-master01:~/k8s-install
    [root@k8s-ha-master01 k8s-install]# docker load -i haproxy.tar && docker load -i keepalived.tar
  2. 修改haproxy配置文件
    [root@k8s-ha-master01 etc]# vim /data/lb/etc/haproxy.cfg
    global
    log 127.0.0.1 local0
    log 127.0.0.1 local1 notice
    maxconn 4096
    #chroot /usr/share/haproxy
    #user haproxy
    #group haproxy
    daemon
    
    defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        retries 3
        option redispatch
        timeout connect  5000
        timeout client  50000
        timeout server  50000
    
    frontend stats-front
      bind *:8081
      mode http
      default_backend stats-back
    
    # keypoint_01 bind *:6444
    frontend fe_k8s_6444
      bind *:6444
      mode tcp
      timeout client 1h
      log global
      option tcplog
      default_backend be_k8s_6443
      acl is_websocket hdr(Upgrade) -i WebSocket
      acl is_websocket hdr_beg(Host) -i ws
    
    backend stats-back
      mode http
      balance roundrobin
      stats uri /haproxy/stats
      stats auth pxcstats:secret
    
    backend be_k8s_6443
      mode tcp
      timeout queue 1h
      timeout server 1h
      timeout connect 1h
      log global
      balance roundrobin
      # keypoint_02 启动的第一个节点ip
      server rancher01 192.168.43.110:6443		
    #  server rancher02 192.168.43.120:6443
    #  server rancher03 192.168.43.130:6443

  3. 修改haproxy容器启动脚本
    [root@k8s-ha-master01 lb]# vim /data/lb/start-haproxy.sh

    #!/bin/bash
    MasterIP1=192.168.43.110
    MasterIP2=192.168.43.120
    MasterIP3=192.168.43.130
    MasterPort=6443
    
    docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
            -e MasterIP1=$MasterIP1 \
            -e MasterIP2=$MasterIP2 \
            -e MasterIP3=$MasterIP3 \
            -e MasterPort=$MasterPort \
            -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
            wise2c/haproxy-k8s

  4. 启动haproxy容器创建脚本
    [root@k8s-ha-master01 lb]# ./data/lb/start-haproxy.sh
    [root@k8s-ha-master01 lb]# netstat -antpu | grep 6444
    tcp6 0 0 :::6444 :::* LISTEN 1795/docker-proxy

1.2 启动keepalived

  1. 修改keepalived镜像启动脚本
    [root@k8s-ha-master01 lb]# vim /data/lb/start-keepalived.sh
    #!/bin/bash
    # keyporint_01
    VIRTUAL_IP=192.168.43.100
    INTERFACE=ens34
    NETMASK_BIT=24
    CHECK_PORT=6444
    RID=10
    VRID=160
    MCAST_GROUP=224.0.0.18
    
    docker run -itd --restart=always --name=Keepalived-K8S \
            --net=host --cap-add=NET_ADMIN \
            -e VIRTUAL_IP=$VIRTUAL_IP \
            -e INTERFACE=$INTERFACE \
            -e CHECK_PORT=$CHECK_PORT \
            -e RID=$RID \
            -e VRID=$VRID \
            -e NETMASK_BIT=$NETMASK_BIT \
            -e MCAST_GROUP=$MCAST_GROUP \
            wise2c/keepalived-k8s

  2. 启动
    [root@k8s-ha-master01 lb]# ./start-keepalived.sh
    [root@k8s-ha-master01 lb]# ip addr show
    3: ens34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:50:56:2c:0b:2f brd ff:ff:ff:ff:ff:ff
    inet 192.168.43.110/24 brd 192.168.43.255 scope global noprefixroute ens34
    valid_lft forever preferred_lft forever
    inet 192.168.43.100/24 scope global secondary ens34
    valid_lft forever preferred_lft forever

2.初始化主节点(注意:只初始化一个主节点k8s-ha-master01)

  1. 确认非kubernetes工作网络的网卡已关闭
    [root@k8s-ha-master01 ~]# ip a
  2. 获取kubernetes init默认初始化配置文件
    [root@k8s-ha-master01 ~]# cd /root/k8s_install
    [root@k8s-ha-master01 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
  3. 修改配置
    [root@k8s-ha-master01 ~]# vim kubeadm-config.yaml
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
    #keypoint_01 当前主机ip
      advertiseAddress: 192.168.43.110
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
    #keypoint_02 当前节点名称
      name: k8s-ha-master01
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    #keypoint_03 mmm master:6443
    controlPlaneEndpoint: "192.168.43.100:6444"
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    #keypoint_04 修改镜像仓库
    #imageRepository: k8s.gcr.io
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    #keypoint_05 使用的版本
    kubernetesVersion: v1.15.1
    networking:
      dnsDomain: cluster.local
    #keypoint_06 flannel网络插件的默认网关
      podSubnet: "10.244.0.0/16"
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    #keypoint_07 指定ipvs转发
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    featureGates:
      SupportIPVSProxyMode: true
    mode: ipvs

    单master节点

    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
    #keypoint_01 当前主机ip
      advertiseAddress: 192.168.43.101
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
    #keypoint_02 当前节点名称
      name: master01
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    #keypoint_03 修改镜像仓库
    #imageRepository: k8s.gcr.io
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    #keypoint_04 使用的版本
    kubernetesVersion: v1.15.1
    networking:
      dnsDomain: cluster.local
    #keypoint_05 flannel网络插件的默认网关
      podSubnet: "10.244.0.0/16"
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    #keypoint_06 指定ipvs转发
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    featureGates:
      SupportIPVSProxyMode: true
    mode: ipvs
    

  4.  初始化主节点
    [root@k8s-ha-master01 ~]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

  5. From kubeadm-init.log, 执行提示操作
    [root@k8s-ha-master01 ~]# su - k8s
    [k8s@k8s-ha-master01 ~]$ mkdir -p $HOME/.kube
    [k8s@k8s-ha-master01 ~]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [k8s@k8s-ha-master01 ~]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

  6. 查看
     
    [k8s@k8s-ha-master01 ~]$ vim $HOME/.kube/config
    server: https://192.168.43.110:6443
    
    [k8s@k8s-ha-master01 ~]$ kubectl edit configmaps -n kube-system kubeadm-config
    [k8s@k8s-ha-master01 ~]$ kubectl get node
    NAME       STATUS     ROLES    AGE   VERSION
    k8s-ha-master01   NotReady   master   20m   v1.15.1
    [k8s@master01 ~]$ kubectl get pod --all-namespaces
    NAMESPACE     NAME                               READY   STATUS    RESTARTS   AGE
    kube-system   coredns-bccdc95cf-9pcq6            0/1     Pending   0          14m
    kube-system   coredns-bccdc95cf-jsnk6            0/1     Pending   0          14m
    kube-system   etcd-master01                      1/1     Running   0          13m
    kube-system   kube-apiserver-master01            1/1     Running   0          13m
    kube-system   kube-controller-manager-master01   1/1     Running   0          13m
    kube-system   kube-proxy-kcn9p                   1/1     Running   0          14m
    kube-system   kube-scheduler-master01            1/1     Running   0          13m
    [k8s@k8s-ha-master01 ~]$ kubectl get svc --all-namespaces
    NAMESPACE     NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
    default       kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP                  16m
    kube-system   kube-dns     ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   16m

3.k8s-ha-master02节点加入集群

#1.18后需要拷贝证书  
scp -rp /etc/kubernetes/pki/ca.* master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/sa.* master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/front-proxy-ca.* master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/etcd/ca.* master02:/etc/kubernetes/pki/etcd
scp -rp /etc/kubernetes/admin.conf master02:/etc/kubernetes
  1. k8s-ha-master02完成上面第1步,启动 Haproxy 与 keepalived 容器
  2. 确认非kubernetes指定的网卡已关闭
    [root@k8s-ha-master02 ~]# ip a
  3. 检查hosts文件,须包含本主机
    [root@k8s-ha-master02 ~]# cat /etc/hosts
  4. From kubeadm-init.log, 加入集群
    [root@k8s-ha-master02 ~]# kubeadm join 192.168.43.100:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:59c2072cf06f03d0bf9992987416309146632f8124115ccf171eca1cbe87df39 \
    --control-plane --certificate-key 49c93125e0a23c4e0323b0590abe043b51ce9d0555b42a03da92925b49b36d1a
  5. 执行提示操作
    [root@k8s-ha-master02 ~]# mkdir -p $HOME/.kube
    [root@k8s-ha-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@k8s-ha-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
  6. 查看
    [root@k8s-ha-master02 k8s-install]# vim $HOME/.kube/config
    server: https://192.168.43.120:6443
    [root@k8s-ha-master02 ~]# kubectl get node
    [root@k8s-ha-master02 ~]# kubectl get pod -n kube-system
    [root@k8s-ha-master02 ~]# kubectl edit configmaps -n kube-system kubeadm-config

4.k8s-ha-master03节点加入集群(同k8s-ha-master02节点 完全一样)

[root@k8s-ha-master02 k8s-install]# vim $HOME/.kube/config
server: https://192.168.43.130:6443

5.完成haproxy的配置 ,3个ha-master-node都要配置

需要修改的地方,红色已标出

[root@k8s-ha-master01 k8s-install]# vim /data/lb/etc/haproxy.cfg
      *********
server rancher01 192.168.43.110:6443
server rancher02 192.168.43.120:6443
server rancher03 192.168.43.130:6443

[root@k8s-ha-master01 etc]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh

6.部署flannel网络

任一主节点进行以下操作

#找出镜像版本,pull必需的镜像
[k8s@k8s-ha-master01 ~]$ wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[k8s@k8s-ha-master01 ~]$ cat kube-flannel.yml | grep image
[k8s@k8s-ha-master01 ~]$ sudo docker pull quay.io/coreos/flannel:v0.11.0-amd64
[k8s@k8s-ha-master01 ~]$ kubectl apply -f kube-flannel.yml
[k8s@k8s-ha-master01 ~]$ kubectl get pod -n kube-system -owide
NAME                               READY   STATUS    RESTARTS   AGE     IP             NODE       NOMINATED NODE   READINESS GATES
coredns-bccdc95cf-6w8nd            1/1     Running   0          32m     10.244.0.2     master01   <none>           <none>
coredns-bccdc95cf-nwc9h            1/1     Running   0          32m     10.244.0.3     master01   <none>           <none>
etcd-master01                      1/1     Running   0          31m     192.168.1.21   master01   <none>           <none>
etcd-master02                      1/1     Running   0          21m     192.168.1.22   master02   <none>           <none>
etcd-master03                      1/1     Running   0          22m     192.168.1.23   master03   <none>           <none>
kube-apiserver-master01            1/1     Running   0          31m     192.168.1.21   master01   <none>           <none>
kube-apiserver-master02            1/1     Running   0          21m     192.168.1.22   master02   <none>           <none>
kube-apiserver-master03            1/1     Running   0          22m     192.168.1.23   master03   <none>           <none>
kube-controller-manager-master01   1/1     Running   1          31m     192.168.1.21   master01   <none>           <none>
kube-controller-manager-master02   1/1     Running   0          21m     192.168.1.22   master02   <none>           <none>
kube-controller-manager-master03   1/1     Running   0          22m     192.168.1.23   master03   <none>           <none>
kube-flannel-ds-amd64-krz6s        1/1     Running   0          2m44s   192.168.1.21   master01   <none>           <none>
kube-flannel-ds-amd64-scdw6        1/1     Running   0          2m44s   192.168.1.22   master02   <none>           <none>
kube-flannel-ds-amd64-v4t8p        1/1     Running   0          2m44s   192.168.1.23   master03   <none>           <none>
kube-proxy-44tsk                   1/1     Running   0          21m     192.168.1.22   master02   <none>           <none>
kube-proxy-jqj95                   1/1     Running   0          22m     192.168.1.23   master03   <none>           <none>
kube-proxy-wqxrz                   1/1     Running   0          32m     192.168.1.21   master01   <none>           <none>
kube-scheduler-master01            1/1     Running   1          31m     192.168.1.21   master01   <none>           <none>
kube-scheduler-master02            1/1     Running   0          21m     192.168.1.22   master02   <none>           <none>
kube-scheduler-master03            1/1     Running   0          22m     192.168.1.23   master03   <none>           <none>
[k8s@k8s-ha-master01 ~]$ kubectl get node -owide
NAME       STATUS   ROLES    AGE   VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
master01   Ready    master   35m   v1.15.1   192.168.1.21   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.9.0
master02   Ready    master   23m   v1.15.1   192.168.1.22   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.9.0
master03   Ready    master   24m   v1.15.1   192.168.1.23   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.9.0
[k8s@k8s-ha-master01 ~]$ ifconfig
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450

7.查看集群信息:

7.1 查看 controller-manager

[k8s@k8s-ha-master01 ~]$ kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml

7.2 查看 scheduler

[k8s@k8s-ha-master01 ~]$ kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml

7.3 查询Etcd 集群状态

[k8s@k8s-ha-master01 ~]$ kubectl -n kube-system exec etcd-k8s-ha-master03 -- etcdctl --endpoints=https://192.168.43.110:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health

member 10260bfec73117a0 is healthy: got healthy result from https://192.168.43.110:2379
member 258562861e8b997b is healthy: got healthy result from https://192.168.43.130:2379
member 8b0e262770f25357 is healthy: got healthy result from https://192.168.43.120:2379

4)查看kubeadm配置kubeadm-config

[k8s@k8s-ha-master01 ~]$ kubectl edit configmaps -n kube-system kubeadm-config

# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
  ClusterConfiguration: |
    apiServer:
      extraArgs:
        authorization-mode: Node,RBAC
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
# keypoint_01 
    controlPlaneEndpoint: 192.168.43.100:6444
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: k8s.gcr.io
    kind: ClusterConfiguration
    kubernetesVersion: v1.15.1
    networking:
      dnsDomain: cluster.local
# keypoint_02
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
  ClusterStatus: |
    apiEndpoints:
# keypoint_03
      k8s-ha-master01:
        advertiseAddress: 192.168.43.110
        bindPort: 6443
      k8s-ha-master02:
        advertiseAddress: 192.168.43.120
        bindPort: 6443
      k8s-ha-master03:
        advertiseAddress: 192.168.43.130
        bindPort: 6443
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterStatus
kind: ConfigMap
metadata:
  creationTimestamp: "2020-03-11T07:38:32Z"
  name: kubeadm-config
  namespace: kube-system
  resourceVersion: "4496"
  selfLink: /api/v1/namespaces/kube-system/configmaps/kubeadm-config
  uid: c75e3ed8-c857-408c-96c8-0b7d176ddb2b

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值