k8s 1.16 部署
一、k8s集群服务器初始化,在所有节点上运
1、 关闭firewalld selinux
systemctl stop firewalld&&systemctl disable firewalld
sed -i '/SELINUX=/s/enforcing/disabled/' /etc/selinux/config
setenforce 0
2、关闭swap
sed -i '/swap/s/^/#/' /etc/fstab
swapoff -a
3、同步时间
ntpdate domsrvbj20.hna.net
4、配置主机名,分发hosts文件,统一解析
二、通过cfssl工具制作自签证书
1、创建证书文件工作目录
mkdir -pv /TSL/etcd /TSL/k8s
2、安装cfssl证书制作工具
tar -xf cfssl.tar.gz
cp cfssl /usr/bin/cfssl
cp cfssl-certinfo /usr/bin/cfssl-certinfo
cp cfssljson /usr/bin/cfssljson
三、搭建etcd集群
在 /opt/ssl/etcd目录下准备etcd证书请求文件
CN=commonName
OU=organizationUnit
O=organizationName
L=localityName
S=stateName
C=country
cat > ca-csr.json <<EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > server-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"192.168.10.20",
"192.168.10.22",
"192.168.10.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
为etcd的ca签发证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
查看生成的证书文件
[root@master1 cfg]# ls /k8s/etcd-cert/
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem server.csr server-csr.json server-key.pem server.pem
在3个etcd节点创建工作目录
mkdir -pv /opt/etcd/{bin,cfg,ssl}
解压etcd安装包分发相关文件到三台服务器
tar -xf etcd-v3.3.13-linux-amd64.tar.gz
cp etcd etcdctl /opt/etcd/bin/
cp /opt/ssl/etcd/{ca,server,server-key}.pem /opt/etcd/ssl/
创建etcd配置文件
cat > /opt/etcd/cfg/etcd.conf <<EOF
#[Member]
ETCD_NAME="etcd-1" #在etcd集群中的名称
ETCD_DATA_DIR="/opt/etcd/data" #数据目录
ETCD_LISTEN_PEER_URLS="https://192.168.10.20:2380" #本机IP
ETCD_LISTEN_CLIENT_URLS="https://192.168.10.20:2379"#本机IP
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.10.20:2380"#本机IP
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.10.20:2379"#本机IP
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.10.20:2380,etcd-2=https://192.168.10.22:2380,etcd-3=https://192.168.10.23:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
创建etcds的service文件
cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
启动并设置开机自启
systemctl start etcd&&systemctl enable
etcdetcd集群健康检查
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.10.20:2379,https://192.168.10.22:2379,https://192.168.10.23:2379" cluster-health
执行后查看到etcd集群为"healthly"状态
member 1778dce7e1e4850 is healthy: got healthy result from https://192.168.10.23:2379
member 22f15f5d46aec4a0 is healthy: got healthy result from https://192.168.10.22:2379
member be3fc3d5e1dfe2ce is healthy: got healthy result from https://192.168.10.20:2379
cluster is healthy
四、在所有节点上安装docker
#解压安装包
tar -xf docker-18.09.6.tgz
#拷贝二进制文件
cp /docker/* /usr/bin/
#创建service文件
vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
#启动docker,并设置开机启动
systemctl start docker.service && systemctl enable docker.service
执行docker info出现如下警告
WARNING: bridge-nf-call-iptables is disabled
WARNING: bridge-nf-call-ip6tables is disabled
解决办法:
vim /etc/sysctl.conf #添加以下配置
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
sysctl -p
五、搭建k8s-master
在/k8s/k8s-cert目录下生成API证书文件
生成证书的脚本文件
vim k8s-cert.sh #运行时删除相关注释
# 编写ca证书的配置文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
# 编写ca 签名证书文件
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
# 使用签名证书生成ca.pem、ca-key.pem文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#编写apiserver的签名证书
#注意:master2节点和LB节点的IP地址是 为了之后的多master节点部署添加的
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.10.20", //master1节点,运行时删除
"192.168.10.21", //master2节点,运行时删除
"192.168.10.100", //vip地址,运行时删除
"192.168.10.101", //负载调度器 (master),运行时删除
"192.168.10.102", //负载调度器 (backup),运行时删除
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
#使用之前的文件生成server证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#编写用户证书
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# 编写 kube-proxy 证书
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
查看生成的证书文件
# ls
admin.csr admin-key.pem ca-config.json ca-csr.json ca.pem kube-proxy.csr kube-proxy-key.pem server-csr.json
admin-csr.json admin.pem ca.csr ca-key.pem k8s-cert.sh kube-proxy-csr.json kube-proxy.pem
在master上创建kubernetes工作目录
mkdir -pv /opt/kubernetes/{cfg,ssl,bin,logs}
拷贝通信证书到 /opt/kubernetes/ssl目录下
#分发k8s证书
cp ca*pem server*pem /opt/kubernetes/ssl/
解压k8s二进制文件,拷贝相关文件到制定目录
cp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /opt/kubernetes/bin/
cp /kubernetes/server/bin/kubectl /usr/bin
创建并分发token文件
#head -c 16 /dev/urandom | od -An -t x | tr -d ' ' #复制序列号写入 token.csv 中
5215984ab589158ffaf315249da4c0c9
# vim /opt/kubernetes/cfg/token.csv #写入内容:序列号,用户名,id,角色
5215984ab589158ffaf315249da4c0c9,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
创建k8s配置文件
1、创建apiserver.sh脚本,部署kube-apiserver
vim apiserver.sh
#!/bin/bash
MASTER_ADDRESS=$1
ETCD_SERVERS=$2
#在k8s工作目录里生成kube-apiserver 配置文件
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
#生成启动脚本
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
#启动apiserver组件
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
运行脚本生成kube-apiserver相关文件
bash apiserver.sh 192.168.10.20 https://192.168.10.20:2379,https://192.168.10.22:2379,https://192.168.10.23:2379
查看相关监听端口是否启动
[root@master1 k8s]# netstat -natp | egrep "6443|8080"
tcp 0 0 192.168.10.20:6443 0.0.0.0:* LISTEN 8614/kube-apiserver
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 8614/kube-apiserver
tcp 0 0 192.168.10.20:40278 192.168.10.20:6443 ESTABLISHED 8614/kube-apiserver
tcp 0 0 192.168.10.20:6443 192.168.10.20:40278 ESTABLISHED 8614/kube-apiserver
2、创建scheduler.sh脚本,部署kube-scheduler
vim scheduler.sh
#!/bin/bash
MASTER_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
运行脚本生成kube-scheduler相关文件
bash scheduler.sh 127.0.0.1
3、创建controller-manager.sh脚本,部署kube-controller-manager
vim controller-manager.sh
#!/bin/bash
MASTER_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
运行脚本生成kube-controller-manager相关文件
bash controller-manager.sh 127.0.0.1
查看集群状态
[root@master1 k8s]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
六、搭建k8s-nodes
1、创建工作目录
mkdir -pv /opt/kubernetes/{cfg,ssl,bin,logs}
2、拷贝kubelet kube-proxy到工作目录,相关文件通过接下k8s安装包得到
cp kubelet kube-proxy /opt/kubernetes/bin/
3、拷贝通信证书文件
cp kube-proxy-key.pem kube-proxy.pem /opt/kubernetes/ssl/
4、创建kubeconfig脚本
vim kubeconfig.sh
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
#Token与/opt/kubernetes/cfg/token.csv一致
BOOTSTRAP_TOKEN=5215984ab589158ffaf315249da4c0c9
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
#----------------------
APISERVER=$1
SSL_DIR=$2
# 创建kubelet bootstrapping kubeconfig
export KUBE_APISERVER="https://$APISERVER:6443"
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#----------------------
# 创建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=$SSL_DIR/kube-proxy.pem \
--client-key=$SSL_DIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
5、运行脚本生成相关配置文件
bash kubeconfig 192.168.10.20 /opt/kubernetes/ssl
6、分发相关配置文件
cp bootstrap.kubeconfig kube-proxy.kubeconfig /opt/kubernetes/cfg/
7、创建 bootstrap角色赋予权限,用于连接 apiserver请求签名(重要)
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
8、部署kubelet组件
编写kubelet.sh脚本
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
运行脚本,向master集群发送请求
bash kubelet.sh 192.168.10.22
9、部署kube-proxy组件
编写proxy.sh 脚本
vim proxy.sh
#!/bin/bash
NODE_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
在节点上操作,启动 proxy服务,并查看状态是否正常
bash proxy.sh 192.168.10.22
在master节点上,检查节点的请求
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-hi3DDOtGe_-9sSAeQg0xHTd8eH2Q_unRtzsgTw8DIDA 2m42s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
master设置同意连接请求,颁发证书,并查看加入成功
# kubectl certificate approve node-csr-hi3DDOtGe_-9sSAeQg0xHTd8eH2Q_unRtzsgTw8DIDA
certificatesigningrequest.certificates.k8s.io/node-csr-hi3DDOtGe_-9sSAeQg0xHTd8eH2Q_unRtzsgTw8DIDA approved
# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.10.22 NotReady <none> 6s v1.18.4
七、部署k8s-nodes网络组件 cni、coredns、flannel
1、部署cni
mkdir /opt/cni/bin /etc/cni/net.d
tar zxvf cni-plugins-linux-amd64-v0.8.2.tgz –C /opt/cni/bin
#确保kubelet启用CNI
echo '--network-plugin=cni' >> /opt/kubernetes/cfg/kubelet
2、通过二进制包部署flannel
将flannel使用的网络信息写入etcd
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.10.20:2379,https://192.168.10.22:2379,https://192.168.10.23:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
下载flannel二进制包
https://github.com/coreos/flannel/releases
部署与配置flannel运行以下脚本自动创建相关配置文件及启动文件
bash flannel.sh https://192.168.10.20:2379,https://192.168.10.22:2379,https://192.168.10.23:2379 #etcd集群
cat flannel.sh
#!/bin/bash
ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
cat <<EOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart docker
检查网络是否配置成功
ps -ef|grep docker
root 23303 1 0 16:42 ? 00:00:01 /usr/bin/dockerd --bip=172.17.87.1/24 --ip-masq=false --mtu=1450
ifconfig 显示flannel与docker0在同一网段
3、设置kubelet加密访问
cat > apiserver-to-kubelet-rbac.yaml <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: example.com
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
kubectl apply –f apiserver-to-kubelet-rbac.yaml