Kubernetes群集部署(二进制部署、单节点、多节点)


相关软件包及脚本文件:
链接:https://pan.baidu.com/s/1LPbC5na_nhrVha2agW0NTA
提取码:72m8

一、 环境准备

负载均衡lb
Nginx01:14.0.0.77/24
Nginx02:14.0.0.88/24
Master节点
master:14.0.0.11/24
master02:14.0.0.66/24
Node节点
node01:14.0.0.33/24
node02:14.0.0.55/24

VIP :14.0.0.100

在这里插入图片描述

  • 官网地址:https://github.com/kubernetes/kubernetes/releases?after=v1.13.1

  • etcd: 使用 ca.pem、server-key.pem、server.pem;

  • kube-apiserver: 使用 ca.pem、server-key.pem、server.pem;

  • kubelet: 使用 ca.pem;

  • kube-proxy: 使用 ca.pem、kube-proxy-key.pem、kube-proxy.pem;

  • kubectl: 使用 ca.pem、admin-key.pem、admin.pem;

  • kube-controller-manager: 使用 ca-key.pem、ca.pem

二、k8s单节点部署

2.1 单节点环境

Master:14.0.0.11/24 kube-apiserver kube-controller-manager kube-scheduler etcd
Node01:14.0.0.33/24 kubelet kube-proxy docker flannel etcd
Node02:14.0.0.55/24 kubelet kube-proxy docker flannel etcd
VIP:14.0.0.100

2.2 制作etcd

2.2.1 master 操作

hostnamectl set-hostname master
su
iptables -F
setenforce 0
mkdir k8s
cd k8s

#拖入 etcd-cert.sh  etcd.sh 软件包

[root@master k8s]# mkdir etcd-cert
[root@master k8s]# ls
etcd-cert  etcd-cert.sh  etcd.sh
[root@master k8s]# cd etcd-cert/

下载证书制作工具

#拖入cfssl  cfssl-certinfo  cfssljson 三个软件包

[root@master etcd-cert]# mv cfssl* /usr/local/bin/
[root@master etcd-cert]# cd /usr/local/bin/
[root@master bin]# chmod +x *

开始制作证书
cfssl 生成证书工具
cfssljson通过传入json文件生成证书
cfssl-certinfo查看证书信息

定义ca证书

[root@master bin]# cd /root/k8s/etcd-cert/

cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"     
        ]  
      } 
    }         
  }
}
EOF

[root@master etcd-cert]# ls
ca-config.json

实现证书签名

cat > ca-csr.json <<EOF 
{   
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF
[root@master etcd-cert]# ls
ca-config.json  ca-csr.json

生产证书,生成ca-key.pem ca.pem

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

2020/09/28 16:30:41 [INFO] generating a new CA key and certificate from CSR
2020/09/28 16:30:41 [INFO] generate received request
2020/09/28 16:30:41 [INFO] received CSR
2020/09/28 16:30:41 [INFO] generating key: rsa-2048
2020/09/28 16:30:41 [INFO] encoded CSR
2020/09/28 16:30:41 [INFO] signed certificate with serial number 307381503427781940669935687801094331494181475343

指定etcd三个节点之间的通信验证

cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "14.0.0.11",
    "14.0.0.33",
    "14.0.0.55"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

生成ETCD证书 server-key.pem server.pem

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

2020/09/28 16:50:37 [INFO] generate received request
2020/09/28 16:50:37 [INFO] received CSR
2020/09/28 16:50:37 [INFO] generating key: rsa-2048
2020/09/28 16:50:38 [INFO] encoded CSR
2020/09/28 16:50:38 [INFO] signed certificate with serial number 324230946150283329636351041053909010928858705491
2020/09/28 16:50:38 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@master etcd-cert]# ls
ca-config.json  ca-csr.json  ca.pem      server-csr.json  server.pem
ca.csr          ca-key.pem   server.csr  server-key.pem

[root@master etcd-cert]# cd ..
#拖入kubernetes-server-linux-amd64.tar.gz、etcd-cert.sh  etcd-v3.3.10-linux-amd64.tar.gz 软件包

[root@master k8s]# ls
etcd-cert     etcd.sh                          kubernetes-server-linux-amd64.tar.gz
etcd-cert.sh  etcd-v3.3.10-linux-amd64.tar.gz

[root@master k8s]# tar zxvf etcd-v3.3.10-linux-amd64.tar.gz

[root@master k8s]# cd etcd-v3.3.10-linux-amd64/
[root@master etcd-v3.3.10-linux-amd64]# mkdir -p /opt/etcd/{cfg,bin,ssl}
[root@master etcd-v3.3.10-linux-amd64]# ls /opt/etcd
bin  cfg  ssl
[root@master etcd-v3.3.10-linux-amd64]# mv etcd etcdctl /opt/etcd/bin/
[root@master etcd-v3.3.10-linux-amd64]# ls /opt/etcd/bin/
etcd  etcdctl
[root@master etcd-v3.3.10-linux-amd64]# ls
Documentation  README-etcdctl.md  README.md  READMEv2-etcdctl.md

证书拷贝

[root@master etcd-v3.3.10-linux-amd64]# cd k8s/
[root@master k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/
[root@master k8s]# ls /opt/etcd/ssl/
ca-key.pem  ca.pem  server-key.pem  server.pem

进入卡住状态等待其他节点加入

[root@master k8s]# bash etcd.sh etcd01 14.0.0.11 etcd02=https://14.0.0.33:2380,etcd03=https://14.0.0.55:2380
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.

使用另外一个会话打开,会发现etcd进程已经开启

[root@master ~]# ps -ef | grep etcd
root     109088 108292  0 17:05 pts/1    00:00:00 bash etcd.sh etcd01 14.0.0.11 etcd02=https://14.0.0.33:2380,etcd03=https://14.0.0.55:2380
root     109135 109088  0 17:05 pts/1    00:00:00 systemctl restart etcd
root     109141      1  2 17:05 ?        00:00:01 /opt/etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=https://14.0.0.11:2380 --listen-client-urls=https://14.0.0.11:2379,http://127.0.0.1:2379 --advertise-client-urls=https://14.0.0.11:2379 --initial-advertise-peer-urls=https://14.0.0.11:2380 --initial-cluster=etcd01=https://14.0.0.11:2380,etcd02=https://14.0.0.33:2380,etcd03=https://14.0.0.55:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
root     109208 109165  0 17:06 pts/2    00:00:00 grep --color=auto etcd
[root@master ~]# systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: activating (start) since 一 2020-09-28 17:06:56 CST; 26s ago
 Main PID: 109219 (etcd)
    Tasks: 12
   CGroup: /system.slice/etcd.service
           └─109219 /opt/etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd/default.etcd --listen...

9月 28 17:07:20 master etcd[109219]: 8d69898c95880a34 [logterm: 1, index: 3] sent MsgVote requ...m 82
9月 28 17:07:21 master etcd[109219]: 8d69898c95880a34 is starting a new election at term 82
9月 28 17:07:21 master etcd[109219]: 8d69898c95880a34 became candidate at term 83
9月 28 17:07:21 master etcd[109219]: 8d69898c95880a34 received MsgVoteResp from 8d69898c95880a...m 83
9月 28 17:07:21 master etcd[109219]: 8d69898c95880a34 [logterm: 1, index: 3] sent MsgVote requ...m 83
9月 28 17:07:21 master etcd[109219]: 8d69898c95880a34 [logterm: 1, index: 3] sent MsgVote requ...m 83
9月 28 17:07:21 master etcd[109219]: health check for peer 9e200c4b356955d could not connect: ...GE")
9月 28 17:07:21 master etcd[109219]: health check for peer 9e200c4b356955d could not connect: ...OT")
9月 28 17:07:21 master etcd[109219]: health check for peer fe55d87c017bcbba could not connect:...OT")
9月 28 17:07:21 master etcd[109219]: health check for peer fe55d87c017bcbba could not connect:...GE")
Hint: Some lines were ellipsized, use -l to show in full.

拷贝证书去其他节点

[root@master k8s]# scp -r /opt/etcd/ root@14.0.0.33:/opt/
yes
19961207
[root@master k8s]# scp -r /opt/etcd/ root@14.0.0.55:/opt/
yes
19961207

启动脚本拷贝其他节点

[root@master k8s]# scp -r /usr/lib/systemd/system/etcd.service root@14.0.0.33:/usr/lib/systemd/system/
[root@master k8s]# scp -r /usr/lib/systemd/system/etcd.service root@14.0.0.55:/usr/lib/systemd/system/ 

2.2.2 node01与 node02 节点操作

node01 节点操作

[root@node01 ~]# vim /opt/etcd/cfg/etcd 
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://14.0.0.33:2380"
ETCD_LISTEN_CLIENT_URLS="https://14.0.0.33:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://14.0.0.33:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://14.0.0.33:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://14.0.0.11:2380,etcd02=https://14.0.0.33:2380,etcd03=https://14.0.0.55:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

[root@node01 ~]# systemctl start etcd
[root@node01 ~]# systemctl status etcd

node02 节点操作

[root@node02 ~]# vim /opt/etcd/cfg/etcd 
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://14.0.0.55:2380"
ETCD_LISTEN_CLIENT_URLS="https://14.0.0.55:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://14.0.0.55:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://14.0.0.55:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://14.0.0.11:2380,etcd02=https://14.0.0.33:2380,etcd03=https://14.0.0.55:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

[root@node02 ~]# systemctl start etcd
[root@node02 ~]# systemctl status etcd

master节点检查群集状态

[root@master ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379" cluster-healthmember 9e200c4b356955d is healthy: got healthy result from https://14.0.0.55:2379
member 8d69898c95880a34 is healthy: got healthy result from https://14.0.0.11:2379
member fe55d87c017bcbba is healthy: got healthy result from https://14.0.0.33:2379
cluster is healthy

2.3 docker引擎部署

所有node节点部署docker引擎
详见docker安装脚本

vim docker.sh
#!/bin/bash
rpm -qa | grep docker
if [ $? -eq 0 ];then
  echo "docker已经安装"
else
  echo "docker未安装"
  read -p "是否安装docker:(y/n)" b 
  case $b in
  y)
    echo "开始为您安装docker===================="
    #安装环境依赖包
    yum install yum-utils device-mapper-persistent-data lvm2
    #设置阿里云镜像
    yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos//docker-ce.repo
    #安装路由镜像源
    yum install -y docker-ce
    #关闭防火墙服务、核心防护
    systemctl stop firewalld
    systemctl disable firewalld.service
    setenforce 0
    sed -i "/^SELINUX=/s/enforcing/disabled/" /etc/selinux/config
    #镜像加速
    tee /etc/docker/daemon.json <<-'EOF'
    {
    "registry-mirrors": ["https://8i185852.mirror.aliyuncs.com"]
    }
    EOF
    #重新加载系统参数 
    systemctl daemon-reload
    systemctl restart docker
    #网络优化
    #开启路由转发功能
    echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
    sysctl -p
    service network restart
    systemctl restart docker
  ;;
  n)
    exit
  esac
echo "安装成功"
fi

2.4 flannel网络配置

写入分配的子网段到ETCD中,供flannel使用

[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

	{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

查看写入的信息

[root@master etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379" get /coreos.com/network/config

	{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

##拖入软件包flannel-v0.10.0-linux-amd64.tar.gz到两个节点的家目录

拷贝到所有node节点(只需要部署在node节点即可)
所有node节点操作解压

[root@node01 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
flanneld
mk-docker-opts.sh
README.md

[root@node02 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
flanneld
mk-docker-opts.sh
README.md

k8s工作目录(所有节点)

[root@node01 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@node01 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/

[root@node02 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@node02 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/


[root@node01 ~]# vim flannel.sh

#!/bin/bash

ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld


[root@node02 ~]# vim flannel.sh

#!/bin/bash

ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld

开启flannel网络功能

[root@node01 ~]# bash flannel.sh https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.

[root@node02 ~]# bash flannel.sh https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.

配置docker连接flannel

[root@node01 ~]# vim /usr/lib/systemd/system/docker.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always


[root@node02 ~]# vim /usr/lib/systemd/system/docker.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker 
EnvironmentFile=/run/flannel/subnet.env        #添加这一行
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock       #中间添加$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always


[root@node01 ~]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.55.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.55.1/24 --ip-masq=false --mtu=1450"    //说明:bip指定启动时的子网

[root@node02 ~]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.41.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.41.1/24 --ip-masq=false --mtu=1450"

重启docker服务

[root@node01 ~]#  systemctl daemon-reload
[root@node01 ~]# systemctl restart docker

[root@node02 ~]#  systemctl daemon-reload
[root@node02 ~]# systemctl restart docker

查看flannel网络

[root@node01 ~]# ifconfig
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.55.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::90fd:cfff:fec4:c75e  prefixlen 64  scopeid 0x20<link>
        ether 92:fd:cf:c4:c7:5e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 30 overruns 0  carrier 0  collisions 0

[root@node02 ~]# ifconfig
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.41.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::2898:eeff:fe20:ca83  prefixlen 64  scopeid 0x20<link>
        ether 2a:98:ee:20:ca:83  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 37 overruns 0  carrier 0  collisions 0

测试ping通对方docker0网卡 证明flannel起到路由作用

[root@node01 ~]# ping 172.17.41.1
PING 172.17.41.1 (172.17.41.1) 56(84) bytes of data.
64 bytes from 172.17.41.1: icmp_seq=1 ttl=64 time=0.290 ms
64 bytes from 172.17.41.1: icmp_seq=2 ttl=64 time=0.340 ms

[root@node01 ~]# docker run -it centos:7 /bin/bash
[root@c867dff9bac7 /]# yum install net-tools -y
[root@c867dff9bac7 /]# ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.55.2  netmask 255.255.255.0  broadcast 172.17.55.255
        ether 02:42:ac:11:37:02  txqueuelen 0  (Ethernet)
        RX packets 16334  bytes 12483842 (11.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 8168  bytes 444665 (434.2 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@node02 ~]# docker run -it centos:7 /bin/bash
[root@00e2e0f83ea6 /]# yum install net-tools -y
[root@00e2e0f83ea6 /]# ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.41.2  netmask 255.255.255.0  broadcast 172.17.41.255
        ether 02:42:ac:11:29:02  txqueuelen 0  (Ethernet)
        RX packets 16319  bytes 12481422 (11.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 8079  bytes 439796 (429.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

再次测试ping通两个node中的centos:7容器

[root@c867dff9bac7 /]# ping 172.17.41.2
PING 172.17.41.2 (172.17.41.2) 56(84) bytes of data.
64 bytes from 172.17.41.2: icmp_seq=1 ttl=62 time=0.321 ms
64 bytes from 172.17.41.2: icmp_seq=2 ttl=62 time=0.246 ms
64 bytes from 172.17.41.2: icmp_seq=3 ttl=62 time=0.366 ms

2.5 部署master组件(kube-apiserver、kube-controller-manager、kube-scheduler)

apiserver : 管理k8s集群的入口
etcd证书为了构建集群
本地必须要有etcd服务

在master上操作,api-server生成证书

[root@master ~]# cd k8s
[root@master k8s]# mkdir master
[root@master k8s]# cd master/

#拖入master.zip包

[root@master master]# unzip master.zip 
[root@master master]# chmod +x controller-manager.sh 
[root@master master]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@master master]# cd ..
[root@master k8s]# mkdir k8s-cert
[root@master k8s]# cd k8s-cert/

#拖入脚本k8s-cert.sh

生成ca证书:

cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

server端证书:

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1", 
      "14.0.0.11",           //master1
      "14.0.0.66",          //master2
      "14.0.0.100",         //vip
      "14.0.0.77",          //lb (master)
      "14.0.0.88",          //lb (backup)
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

生成管理员证书:

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

生成代理端证书:

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

[root@master k8s-cert]# ls *pem
admin-key.pem  ca-key.pem  kube-proxy-key.pem  server-key.pem
admin.pem      ca.pem      kube-proxy.pem      server.pem

把ca和server证书放到ssl

[root@master k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/

解压kubernetes压缩包

[root@master k8s-cert]# cd ..
[root@master k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz

复制关键命令文件

[root@master k8s]# cd /root/k8s/kubernetes/server/bin/
[root@master bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/

下面做token,token是一个令牌,绑定着k8s生态圈的某一个角色,用户想用这个令牌进行登录,意味着被赋予了这个角色,就有了一定的操作权限,所以要绑定令牌

使用 head -c 16 /dev/urandom | od -An -t x | tr -d ’ ’ 可以随机生成序列号
e3b411965405ea246dce036761372b89

[root@master bin]# vim /opt/kubernetes/cfg/token.csv

e3b411965405ea246dce036761372b89,kubelet-bootstrap,10001,“system:kubelet-bootstrap”
(序列号,用户名,id,角色)

二进制文件,token,证书都准备好,开启apiserver(k8s中https协议端口:6443、RBAC :k8s安全框架)

[root@master bin]# cd /root/k8s/master/
[root@master master]# bash apiserver.sh 14.0.0.11 https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

检查进程是否启动成功

[root@master master]# ps aux | grep kube
root     115504  6.1  8.0 400140 312008 ?       Ssl  19:39   0:09 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379 --bind-address=14.0.0.11 --secure-port=6443 --advertise-address=14.0.0.11 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root     115568  0.0  0.0 112724   984 pts/1    S+   19:41   0:00 grep --color=auto kube

查看配置文件

[root@master master]# cat /opt/kubernetes/cfg/kube-apiserver 

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379 \
--bind-address=14.0.0.11 \
--secure-port=6443 \
--advertise-address=14.0.0.11 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

监听的https端口

[root@master master]# netstat -ntap | grep 6443              ##https端口
tcp        0      0 14.0.0.11:6443          0.0.0.0:*               LISTEN      115504/kube-apiserv 
tcp        0      0 14.0.0.11:60500         14.0.0.11:6443          ESTABLISHED 115504/kube-apiserv 
tcp        0      0 14.0.0.11:6443          14.0.0.11:60500         ESTABLISHED 115504/kube-apiserv 

[root@master master]# netstat -ntap | grep 8080             ##http端口
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      115504/kube-apiserv 

启动scheduler服务(调度)

[root@master master]# ./scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.

检查进程是否启动成功

[root@master master]# ps aux | grep ku
postfix  114828  0.0  0.1  91732  4088 ?        S    18:43   0:00 pickup -l -t unix -u
root     115504  2.3  5.9 400140 227992 ?       Ssl  19:39   0:24 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379 --bind-address=14.0.0.11 --secure-port=6443 --advertise-address=14.0.0.11 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root     115765  1.4  0.5  46128 19516 ?        Ssl  19:56   0:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
root     115787  0.0  0.0 112728   988 pts/1    S+   19:56   0:00 grep --color=auto ku

启动controller-manager

[root@master master]# ./controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.

检查进程是否启动成功

[root@master master]# ps aux | grep ku
postfix  114828  0.0  0.1  91732  4088 ?        S    18:43   0:00 pickup -l -t unix -u
root     115504  2.2  6.2 400140 242476 ?       Ssl  19:39   0:27 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379 --bind-address=14.0.0.11 --secure-port=6443 --advertise-address=14.0.0.11 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root     115765  1.2  0.5  46128 20704 ?        Ssl  19:56   0:02 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
root     115861  5.0  1.5 140732 60996 ?        Ssl  19:59   0:02 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.0.0.0/24 --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root     115882  0.0  0.0 112728   988 pts/1    S+   19:59   0:00 grep --color=auto ku

查看master 节点状态

[root@master master]# /opt/kubernetes/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   

2.6 node01节点部署

master上操作,把 kubelet、kube-proxy拷贝到node节点上去

[root@master master]# cd /root/k8s/kubernetes/server/bin/

[root@master bin]# scp kubelet kube-proxy root@14.0.0.33:/opt/kubernetes/bin/
root@14.0.0.33's password: 
kubelet                                                              100%  168MB  90.2MB/s   00:01    
kube-proxy                                                           100%   48MB  87.6MB/s   00:00    
[root@master bin]# scp kubelet kube-proxy root@14.0.0.55:/opt/kubernetes/bin/
root@14.0.0.55's password: 
kubelet                                                              100%  168MB  71.8MB/s   00:02    
kube-proxy                                                           100%   48MB 130.2MB/s   00:00    

nod01 节点操作(复制node.zip到/root目录下再解压)
##拖入软件包node.zip

解压node.zip,获得kubelet.sh proxy.sh

[root@node01 ~]# unzip node.zip 
Archive:  node.zip
  inflating: proxy.sh                
  inflating: kubelet.sh       

nod02 节点操作(复制node.zip到/root目录下再解压)
##拖入软件包node.zip
解压node.zip,获得kubelet.sh proxy.sh

[root@node02 ~]# unzip node.zip 

在 master 上操作
创建kubeconfig,为了让node节点添加k8s群集的环境准备

[root@master bin]# cd /root/k8s/
[root@master k8s]# mkdir kubeconfig
[root@master k8s]# cd kubeconfig/

拷贝kubeconfig.sh文件进行重命名,拖入软件包

[root@master kubeconfig]# mv kubeconfig.sh kubeconfig
[root@master kubeconfig]# vim kubeconfig
	#删除以下部分1-10行
  1 # 创建 TLS Bootstrapping Token
  2 #BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
  3 BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
  4 
  5 cat > token.csv <<EOF
  6 ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
  7 EOF
  8 
  9 #----------------------
 10   

获取token信息(复制前面的id号)

[root@master kubeconfig]# cat /opt/kubernetes/cfg/token.csv
e3b411965405ea246dce036761372b89,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

配置文件修改为tokenID: --token=e3b411965405ea246dce036761372b89

[root@master kubeconfig]# vim kubeconfig 
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=e3b411965405ea246dce036761372b89 \
  --kubeconfig=bootstrap.kubeconfig

设置环境变量(写入到/etc/profile中)

[root@master kubeconfig]# vim /etc/profile     #最后一行加入

export PATH=$PATH:/opt/kubernetes/bin/

执行

[root@master kubeconfig]# source /etc/profile
[root@master kubeconfig]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   

生成配置文件

[root@localhost kubeconfig]# bash kubeconfig 14.0.0.11 /root/k8s/k8s-cert/
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".

[root@master kubeconfig]# ls     
bootstrap.kubeconfig  kubeconfig  kube-proxy.kubeconfig

拷贝配置文件到node节点

[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@14.0.0.33:/opt/kubernetes/cfg/
root@14.0.0.33's password: 
bootstrap.kubeconfig                                                  100% 2163     2.5MB/s   00:00    
kube-proxy.kubeconfig                                                 100% 6265     7.7MB/s   00:00    
[root@master kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@14.0.0.55:/opt/kubernetes/cfg/
root@14.0.0.55's password: 
bootstrap.kubeconfig                                                  100% 2163     2.4MB/s   00:00    
kube-proxy.kubeconfig                                                 100% 6265     8.7MB/s   00:00    

创建bootstrap角色赋予权限用于连接apiserver请求签名(【关键】)
(所有的node端与master打交道都要以一个kubelet-bootstrap角色身份)

[root@master kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
	
	clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

在node01节点上操作

[root@node01 ~]# bash kubelet.sh 14.0.0.33
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

检查kubelet服务启动

[root@node01 ~]# ps aux | grep kube
root      57995  8.6  1.1 405340 42748 ?        Ssl  09:12   0:00 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=14.0.0.33 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root      58014  0.0  0.0 112724   988 pts/1    S+   09:13   0:00 grep --color=auto kube

master上操作
检查到node01节点的请求(查看未授权的csr请求)【Pending(等待集群给该节点颁发证书)】

[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4   76s   kubelet-bootstrap   Pending

颁发证书

[root@master kubeconfig]# kubectl certificate approve node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4
certificatesigningrequest.certificates.k8s.io/node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4 approved

继续查看证书状态【Approved,Issued(已经被允许加入群集)】

[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4   10m   kubelet-bootstrap   Approved,Issued

查看群集节点,成功加入node01节点

[root@master kubeconfig]# kubectl get nodes
NAME        STATUS   ROLES    AGE   VERSION
14.0.0.33   Ready    <none>   19m   v1.12.3

造成noreday:
1、网络
2、kubelet 出问题了
如果kubelet挂了,启动起不来,立马请检查kubeconfig,重新获取证书
kubeconfig如果没有问题,再检查本地节点地址是否匹配
直到将kubelet起来,再重新进行证书颁发,重新申请加入集群

在node01节点操作
启动proxy服务

[root@node01 ~]# bash proxy.sh 14.0.0.33
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.

[root@node01 ~]# ps aux | grep kube
root      57995  1.1  1.9 889012 75836 ?        Ssl  09:12   0:28 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=14.0.0.33 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root      65987  1.6  0.5  42292 19776 ?        Ssl  09:53   0:00 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=14.0.0.33 --cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
root      66242  0.0  0.0 112728   988 pts/1    S+   09:54   0:00 grep --color=auto kube

[root@node01 ~]# systemctl status kube-proxy.service
● kube-proxy.service - Kubernetes Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since 三 2020-09-30 09:53:47 CST; 1min 6s ago
 Main PID: 65987 (kube-proxy)
    Tasks: 0
   Memory: 7.7M
   CGroup: /system.slice/kube-proxy.service
           ‣ 65987 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=14.0.0...

9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.012860   65987 proxier.go:1472] Bind ad....0.1
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.029608   65987 iptables.go:391] running...ers]
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.031304   65987 proxier.go:672] syncProx...68ms
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.031326   65987 bounded_frequency_runner... 30s
9月 30 09:54:50 node01 kube-proxy[65987]: I0930 09:54:50.013159   65987 config.go:141] Calling h...date
9月 30 09:54:50 node01 kube-proxy[65987]: I0930 09:54:50.013192   65987 config.go:141] Calling h...date
9月 30 09:54:52 node01 kube-proxy[65987]: I0930 09:54:52.020156   65987 config.go:141] Calling h...date
9月 30 09:54:52 node01 kube-proxy[65987]: I0930 09:54:52.020339   65987 config.go:141] Calling h...date
9月 30 09:54:54 node01 kube-proxy[65987]: I0930 09:54:54.026874   65987 config.go:141] Calling h...date
9月 30 09:54:54 node01 kube-proxy[65987]: I0930 09:54:54.026904   65987 config.go:141] Calling h...date
Hint: Some lines were ellipsized, use -l to show in full.

[root@node01 ~]# systemctl status kube-proxy.service
● kube-proxy.service - Kubernetes Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since 三 2020-09-30 09:53:47 CST; 1min 6s ago
 Main PID: 65987 (kube-proxy)
    Tasks: 0
   Memory: 7.7M
   CGroup: /system.slice/kube-proxy.service
           ‣ 65987 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=14.0.0...

9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.012860   65987 proxier.go:1472] Bind ad....0.1
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.029608   65987 iptables.go:391] running...ers]
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.031304   65987 proxier.go:672] syncProx...68ms
9月 30 09:54:48 node01 kube-proxy[65987]: I0930 09:54:48.031326   65987 bounded_frequency_runner... 30s
9月 30 09:54:50 node01 kube-proxy[65987]: I0930 09:54:50.013159   65987 config.go:141] Calling h...date
9月 30 09:54:50 node01 kube-proxy[65987]: I0930 09:54:50.013192   65987 config.go:141] Calling h...date
9月 30 09:54:52 node01 kube-proxy[65987]: I0930 09:54:52.020156   65987 config.go:141] Calling h...date
9月 30 09:54:52 node01 kube-proxy[65987]: I0930 09:54:52.020339   65987 config.go:141] Calling h...date
9月 30 09:54:54 node01 kube-proxy[65987]: I0930 09:54:54.026874   65987 config.go:141] Calling h...date
9月 30 09:54:54 node01 kube-proxy[65987]: I0930 09:54:54.026904   65987 config.go:141] Calling h...date
Hint: Some lines were ellipsized, use -l to show in full.
[root@node01 ~]# systemctl status kubelet.service
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
   Active: active (running) since 三 2020-09-30 09:12:54 CST; 43min ago
 Main PID: 57995 (kubelet)
    Tasks: 19
   Memory: 42.0M
   CGroup: /system.slice/kubelet.service
           └─57995 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=14.0.0.33...

9月 30 09:55:51 node01 kubelet[57995]: I0930 09:55:51.121404   57995 helpers.go:836] eviction ma...6737
9月 30 09:55:51 node01 kubelet[57995]: I0930 09:55:51.121414   57995 eviction_manager.go:317] ev...rved
9月 30 09:55:52 node01 kubelet[57995]: I0930 09:55:52.768378   57995 kubelet.go:2164] Container ...age:
9月 30 09:55:52 node01 kubelet[57995]: I0930 09:55:52.832906   57995 kubelet.go:1970] SyncLoop (...ing)
9月 30 09:55:54 node01 kubelet[57995]: I0930 09:55:54.784539   57995 setters.go:775] Error getti...-ebs
9月 30 09:55:54 node01 kubelet[57995]: I0930 09:55:54.784562   57995 setters.go:775] Error getti...disk
9月 30 09:55:54 node01 kubelet[57995]: I0930 09:55:54.784568   57995 setters.go:775] Error getti...e-pd
9月 30 09:55:54 node01 kubelet[57995]: I0930 09:55:54.832481   57995 kubelet.go:1970] SyncLoop (...ing)
9月 30 09:55:56 node01 kubelet[57995]: I0930 09:55:56.832175   57995 kubelet.go:1970] SyncLoop (...ing)
9月 30 09:55:57 node01 kubelet[57995]: I0930 09:55:57.775917   57995 kubelet.go:2164] Container ...age:
Hint: Some lines were ellipsized, use -l to show in full.

2.7 node02节点部署

在node01节点操作
把现成的/opt/kubernetes目录复制到其他节点进行修改即可

[root@node01 opt]# scp -r /opt/kubernetes/ root@14.0.0.55:/opt/
The authenticity of host '14.0.0.55 (14.0.0.55)' can't be established.
ECDSA key fingerprint is SHA256:z9vHb2dw6Zd/POC6FB9aWs3R7QkKcb9dMJ6JeghBt5s.
ECDSA key fingerprint is MD5:5d:b6:f4:af:3f:1d:23:9f:2b:7b:7b:fa:cd:14:1b:fb.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '14.0.0.55' (ECDSA) to the list of known hosts.
root@14.0.0.55's password: 
flanneld                                                              100%  223   418.5KB/s   00:00    
bootstrap.kubeconfig                                                  100% 2163     3.9MB/s   00:00    
kube-proxy.kubeconfig                                                 100% 6265    10.5MB/s   00:00    
kubelet                                                               100%  373   784.8KB/s   00:00    
kubelet.config                                                        100%  263   477.0KB/s   00:00    
kubelet.kubeconfig                                                    100% 2292     3.3MB/s   00:00    
kube-proxy                                                            100%  185   304.5KB/s   00:00    
mk-docker-opts.sh                                                     100% 2139     3.8MB/s   00:00    
flanneld                                                              100%   35MB 115.3MB/s   00:00    
kubelet                                                               100%  168MB 124.0MB/s   00:01    
kube-proxy                                                            100%   48MB 126.6MB/s   00:00    
kubelet.crt                                                           100% 2165   997.3KB/s   00:00    
kubelet.key                                                           100% 1675     2.1MB/s   00:00    
kubelet-client-2020-09-30-09-22-58.pem                                100% 1269   403.7KB/s   00:00    
kubelet-client-current.pem                                            100% 1269   627.3KB/s   00:00    

##//把kubelet,kube-proxy的service文件拷贝到node2中(启动脚本)
[root@node01 opt]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@14.0.0.55:/usr/lib/systemd/system/
root@14.0.0.55's password: 
kubelet.service                                                       100%  264   439.8KB/s   00:00    
kube-proxy.service                                                    100%  231   347.3KB/s   00:00    

在node02上操作,进行修改
首先删除复制过来的证书,等会node02会自行申请证书

[root@node02 ~]# cd /opt/kubernetes/ssl/
[root@node02 ssl]# rm -rf *

修改配置文件kubelet kubelet.config kube-proxy(三个配置文件)

[root@node02 ssl]# cd ../cfg/
[root@node02 cfg]# vim kubelet

KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=14.0.0.55 \                                  ##14.0.0.33改为14.0.0.55
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

[root@node02 cfg]# vim kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 14.0.0.55                                                      ##14.0.0.33改为14.0.0.55
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true

[root@node02 cfg]# vim kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=14.0.0.55 \                                ##14.0.0.33改为14.0.0.55
--cluster-cidr=10.0.0.0/24 \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

node2启动服务

[root@node02 cfg]# systemctl start kubelet.service 

mater上操作
查看请求

[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4   68m   kubelet-bootstrap   Approved,Issued
node-csr-mzOJMJE3i_6g7GWVK27XW2DGsKGrbHwDwApQMoeMNE4   8s    kubelet-bootstrap   Pending

授权许可加入群集

[root@master kubeconfig]# kubectl certificate approve node-csr-mzOJMJE3i_6g7GWVK27XW2DGsKGrbHwDwApQMoeMNE4
certificatesigningrequest.certificates.k8s.io/node-csr-mzOJMJE3i_6g7GWVK27XW2DGsKGrbHwDwApQMoeMNE4 approved

再次查看证书状态

[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr--Gfv2DId9y-RWvziMubrN96vlNp6fOOIg4O1aA-1dk4   71m     kubelet-bootstrap   Approved,Issued
node-csr-mzOJMJE3i_6g7GWVK27XW2DGsKGrbHwDwApQMoeMNE4   2m57s   kubelet-bootstrap   Approved,Issued

node2 上继续操作
查看证书状态

[root@node02 cfg]# systemctl status kubelet.service 
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; disabled; vendor preset: disabled)
   Active: active (running) since 三 2020-09-30 10:21:23 CST; 5min ago
 Main PID: 11495 (kubelet)
    Tasks: 18
   Memory: 32.9M
   CGroup: /system.slice/kubelet.service
           └─11495 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=14.0.0.5...

9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.104555   11495 helpers.go:836] eviction m...1779
9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.104567   11495 eviction_manager.go:317] e...rved
9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.198082   11495 kubelet.go:2164] Container...age:
9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.225391   11495 setters.go:775] Error gett...-ebs
9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.225418   11495 setters.go:775] Error gett...disk
9月 30 10:26:52 node02 kubelet[11495]: I0930 10:26:52.225424   11495 setters.go:775] Error gett...e-pd
9月 30 10:26:53 node02 kubelet[11495]: I0930 10:26:53.526081   11495 kubelet.go:1970] SyncLoop ...ing)
9月 30 10:26:55 node02 kubelet[11495]: I0930 10:26:55.525741   11495 kubelet.go:1970] SyncLoop ...ing)
9月 30 10:26:57 node02 kubelet[11495]: I0930 10:26:57.208741   11495 kubelet.go:2164] Container...age:
9月 30 10:26:57 node02 kubelet[11495]: I0930 10:26:57.526293   11495 kubelet.go:1970] SyncLoop ...ing)
Hint: Some lines were ellipsized, use -l to show in full.

##设置为开机自启动
[root@node02 cfg]# systemctl enable kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

#启动kube-proxy服务
[root@node02 cfg]# systemctl start kube-proxy.service
[root@node02 cfg]# systemctl enable kube-proxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.

在node1上操作
设为开机自启动

[root@node01 opt]# systemctl enable kubelet.service
[root@node01 opt]# systemctl enable kube-proxy.service

在master上操作
查看群集中的节点(两个节点都添加进来了)

[root@master kubeconfig]# kubectl get nodes
NAME        STATUS   ROLES    AGE    VERSION
14.0.0.33   Ready    <none>   68m    v1.12.3
14.0.0.55   Ready    <none>   7m3s   v1.12.3

进入node2 节点/opt/kubernetes 查看(证书被重新生成)

[root@node02 kubernetes]# tree ./
./
├── bin
│   ├── flanneld
│   ├── kubelet
│   ├── kube-proxy
│   └── mk-docker-opts.sh
├── cfg
│   ├── bootstrap.kubeconfig
│   ├── flanneld
│   ├── kubelet
│   ├── kubelet.config
│   ├── kubelet.kubeconfig
│   ├── kube-proxy
│   └── kube-proxy.kubeconfig
└── ssl
    ├── kubelet-client-2020-09-30-10-24-11.pem
    ├── kubelet-client-current.pem -> /opt/kubernetes/ssl/kubelet-client-2020-09-30-10-24-11.pem
    ├── kubelet.crt
    └── kubelet.key

3 directories, 15 files

单节点部署完成 !!!

三、k8s多节点部署

3.1 master02部署

优先关闭防火墙和selinux服务

iptables -F
setenforce 0
Iptables -t nat -F

在master01上操作

##复制kubernetes目录到master02
[root@master ~]# scp -r /opt/kubernetes/ root@14.0.0.66:/opt/
The authenticity of host '14.0.0.66 (14.0.0.66)' can't be established.
ECDSA key fingerprint is SHA256:y0I5TGkLMAQpI970UjGcMN+OI45x7U/UKJL5JcbmIRI.
ECDSA key fingerprint is MD5:f9:78:cd:e7:7c:9d:f4:68:57:1e:f0:64:95:10:8c:9b.
Are you sure you want to continue connecting (yes/no)? yes 
Warning: Permanently added '14.0.0.66' (ECDSA) to the list of known hosts.
root@14.0.0.66's password: 
Permission denied, please try again.
root@14.0.0.66's password: 
token.csv                                                            100%   84   148.5KB/s   00:00    
kube-apiserver                                                       100%  909     1.4MB/s   00:00    
kube-scheduler                                                       100%   94   159.9KB/s   00:00    
kube-controller-manager                                              100%  483   653.8KB/s   00:00    
kube-apiserver                                                       100%  184MB 134.4MB/s   00:01    
kubectl                                                              100%   55MB 129.6MB/s   00:00    
kube-controller-manager                                              100%  155MB 131.5MB/s   00:01    
kube-scheduler                                                       100%   55MB 125.9MB/s   00:00    
ca-key.pem                                                           100% 1679     2.3MB/s   00:00    
ca.pem                                                               100% 1359     1.9MB/s   00:00    
server-key.pem                                                       100% 1679     1.7MB/s   00:00    
server.pem                                                           100% 1643     1.7MB/s   00:00    

##复制master中的三个组件启动脚本==kube-apiserver.service  kube-controller-manager.service  kube-scheduler.service==

[root@master ~]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@14.0.0.66:/usr/lib/systemd/system/
root@14.0.0.66's password: 
kube-apiserver.service                                               100%  282   511.2KB/s   00:00    
kube-controller-manager.service                                      100%  317   405.2KB/s   00:00    
kube-scheduler.service                                               100%  281   317.2KB/s   00:00    

master02上操作
修改配置文件kube-apiserver中的IP

[root@master02 ~]# cd /opt/kubernetes/cfg/
[root@master02 cfg]# vim kube-apiserver    ##apiserver:管理k8s群集的入口
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://14.0.0.11:2379,https://14.0.0.33:2379,https://14.0.0.55:2379 \
--bind-address=14.0.0.66 \
--secure-port=6443 \
--advertise-address=14.0.0.66 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

特别注意:master02一定要有etcd证书
需要拷贝master01上已有的etcd证书给master02使用

master上操作

[root@master ~]# scp -r /opt/etcd/ root@14.0.0.66:/opt/
root@14.0.0.66's password: 
etcd                                                                   100%  481    87.1KB/s   00:00    
etcd                                                                   100%   18MB 118.8MB/s   00:00    
etcdctl                                                                100%   15MB 123.2MB/s   00:00    
ca-key.pem                                                             100% 1679     1.3MB/s   00:00    
ca.pem                                                                 100% 1265   388.7KB/s   00:00    
server-key.pem                                                         100% 1679     2.3MB/s   00:00    
server.pem                                                             100% 1338     2.0MB/s   00:00    

启动master02中的三个组件服务

[root@master02 cfg]# systemctl start kube-apiserver 
[root@master02 cfg]# systemctl start kube-controller-manager.service 
[root@master02 cfg]# systemctl start kube-scheduler.service 

增加环境变量

[root@master02 cfg]# vim /etc/profile
#末尾添加
export PATH=$PATH:/opt/kubernetes/bin/

[root@master02 cfg]# source /etc/profile
[root@master02 cfg]# kubectl get node
NAME        STATUS   ROLES    AGE    VERSION
14.0.0.33   Ready    <none>   7d5h   v1.12.3
14.0.0.55   Ready    <none>   7d4h   v1.12.3

3.2 nginx01、nginx02操作

安装nginx服务,把nginx.sh和keepalived.conf脚本拷贝到家目录

iptables -F
setenforce 0
systemctl stop firewalld.service

创建本地yum源

[root@nginx01 ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0

安装nginx

[root@nginx01 ~]# yum install nginx -y

添加四层转发

[root@nginx01 ~]# vim /etc/nginx/nginx.conf
##在events和http之间添加
events {
    worker_connections  1024;
}

stream {

   log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
        server 14.0.0.11:6443;
        server 14.0.0.66:6443;
    }
    server {
                listen 6443;
                proxy_pass k8s-apiserver;
    }
    }
http {

[root@nginx01 ~]# systemctl start nginx
[root@nginx01 ~]# netstat -ntap | grep nginx
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      111590/nginx: maste 
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      111590/nginx: maste 

部署keepalived服务

[root@nginx01 ~]# yum install keepalived -y


##拖入keepalived.conf、nginx.sh 两个文件到nginx01 nginx02中
###修改配置文件

##注意:lb01是Mster配置如下:
[root@nginx01 ~]# cd /etc/keepalived/
[root@nginx01 keepalived]# vim keepalived.conf

! Configuration File for keepalived 
 
global_defs { 
   # 接收邮件地址 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   # 邮件发送地址 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        14.0.0.100/24 
    } 
    track_script {
        check_nginx
    } 
}


##注意:lb02是Backup配置如下:
! Configuration File for keepalived 
 
global_defs { 
   # 接收邮件地址 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   # 邮件发送地址 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP 
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 90    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        14.0.0.100/24 
    } 
    track_script {
        check_nginx
    } 
}

两个nginx上都创建nginx检查脚本

[root@nginx01 keepalived]# vim /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
[root@nginx01 keepalived]# chmod +x /etc/nginx/check_nginx.sh

开启keepalived服务

[root@nginx01 keepalived]# systemctl start keepalived

查看lb01地址信息

[root@nginx01 keepalived]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:61:58:cb brd ff:ff:ff:ff:ff:ff
    inet 14.0.0.77/24 brd 14.0.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 14.0.0.100/24 scope global secondary ens33     //漂移地址在lb01中
       valid_lft forever preferred_lft forever
    inet6 fe80::c35c:f881:4fcc:f246/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:db:f0:f9 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:db:f0:f9 brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:83:80:42:e2 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever

查看lb02地址信息

[root@nginx02 keepalived]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:82:89:db brd ff:ff:ff:ff:ff:ff
    inet 14.0.0.88/24 brd 14.0.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::71c9:9f4a:a660:121a/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:d6:71:ca brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:d6:71:ca brd ff:ff:ff:ff:ff:ff

//验证地址漂移(nginx01中使用pkill nginx,再在nginx02中使用ip add 查看)
//恢复操作(在nginx01中先启动nginx服务,再启动keepalived服务)
//nginx站点/usr/share/nginx/html

##nginx01上
[root@nginx01 keepalived]# pkill nginx
[root@nginx01 keepalived]# netstat -ntap |grep nginx
[root@nginx01 keepalived]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:61:58:cb brd ff:ff:ff:ff:ff:ff
    inet 14.0.0.77/24 brd 14.0.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::c35c:f881:4fcc:f246/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:db:f0:f9 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:db:f0:f9 brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:83:80:42:e2 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever

##nginx02上
[root@nginx02 keepalived]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:82:89:db brd ff:ff:ff:ff:ff:ff
    inet 14.0.0.88/24 brd 14.0.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 14.0.0.100/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::71c9:9f4a:a660:121a/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:d6:71:ca brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:d6:71:ca brd ff:ff:ff:ff:ff:ff


[root@nginx01 keepalived]# systemctl start nginx
[root@nginx01 keepalived]# netstat -ntap |grep nginx
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      112127/nginx: maste 
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      112127/nginx: maste 
[root@nginx01 keepalived]# systemctl start keepalived

开始修改 两个node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)

[root@node01 ~]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@node01 ~]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@node01 ~]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
//统统修改为VIP
server: https://192.168.195.100:6443

重启服务

[root@node01 ~]# systemctl restart kubelet.service 
[root@node01 ~]# systemctl restart kube-proxy.service 

替换完成直接自检

[root@node01 cfg]# grep 100 * 
bootstrap.kubeconfig:    server: https://14.0.0.100:6443
kubelet.kubeconfig:    server: https://14.0.0.100:6443
kube-proxy.kubeconfig:    server: https://14.0.0.100:6443

在nginx01上查看nginx的k8s日志

[root@nginx01 keepalived]# tail /var/log/nginx/k8s-access.log
14.0.0.33 14.0.0.66:6443 - [27/Aug/2020:05:18:17 +0800] 200 1114
14.0.0.33 14.0.0.66:6443 - [27/Aug/2020:05:18:17 +0800] 200 1114
14.0.0.55 14.0.0.66:6443 - [27/Aug/2020:05:18:47 +0800] 200 1566
14.0.0.33 14.0.0.66:6443 - [27/Aug/2020:05:18:47 +0800] 200 1566
14.0.0.55 14.0.0.11:6443 - [27/Aug/2020:05:19:45 +0800] 200 1115
14.0.0.55 14.0.0.66:6443 - [27/Aug/2020:05:19:45 +0800] 200 1116
14.0.0.33 14.0.0.66:6443 - [27/Aug/2020:05:19:45 +0800] 200 1116
14.0.0.33 14.0.0.11:6443 - [27/Aug/2020:05:19:45 +0800] 200 1115
14.0.0.33 14.0.0.66:6443 - [27/Aug/2020:05:20:09 +0800] 200 1566
14.0.0.55 14.0.0.66:6443 - [27/Aug/2020:05:20:09 +0800] 200 1566

多节点部署完成!!!

3.3 验证测试

在master01上操作
测试创建pod

[root@master ~]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created

(删除:kubectl delete deploy/nginx)

//查看状态
[root@master ~]# kubectl get pods
NAME                    READY   STATUS              RESTARTS   AGE
nginx-dbddb74b8-t45hv   0/1     ContainerCreating   0          26s     ##正在创建中
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-dbddb74b8-t45hv   1/1     Running   0          34s  //创建完成,运行中

注意日志问题

[root@master ~]# kubectl logs nginx-dbddb74b8-t45hv
Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-t45hv)

赋予管理员权限

[root@master ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

[root@master ~]# kubectl logs nginx-dbddb74b8-t45hv
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Configuration complete; ready for start up

###在master02上也可以查看日志

查看pod网络

[root@master ~]# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE
nginx-dbddb74b8-t45hv   1/1     Running   0          20m   172.17.55.2   14.0.0.33   <none>

在对应网段的node节点上操作可以直接访问

[root@node01 cfg]# curl 172.17.55.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

访问就会产生日志
回到master01操作

[root@master ~]# kubectl logs nginx-dbddb74b8-t45hv
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
172.17.55.1 - - [07/Oct/2020:17:53:44 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"

##node节点多访问几次
[root@master ~]# kubectl logs nginx-dbddb74b8-t45hv
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
172.17.55.1 - - [07/Oct/2020:17:53:44 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
172.17.55.1 - - [07/Oct/2020:17:54:54 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
172.17.55.1 - - [07/Oct/2020:17:54:55 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值