k8s安装部署
一、环境准备
1、准备了8台服务器,master和node各3台,nginx2台部署nginx+Keepalive高可用集群,虚拟ip使用172.21.74.10
2、修改主机名
hostnamectl set-hostname hostname
3、修改hosts
/etc/hosts
172.21.74.2 k8s-master01-sit
172.21.74.3 k8s-master02-sit
172.21.74.4 k8s-master03-sit
172.21.74.5 k8s-node01-sit
172.21.74.6 k8s-node02-sit
172.21.74.7 k8s-node03-sit
4、安装依赖
yum install -y socat conntrack ipvsadm ipset jq sysstat curl iptables libseccomp yum-utils
5、关闭防火墙、selinux、swap,重置iptables
关闭selinux
setenforce 0
sed -i ‘/SELINUX/s/enforcing/disabled/’ /etc/selinux/config
关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
设置iptables规则
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
关闭swap
swapoff -a && free –h
关闭dnsmasq(否则可能导致容器无法解析域名)
service dnsmasq stop && systemctl disable dnsmasq
6、k8s参数设置
制作配置文件
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
vm.overcommit_memory = 1
EOF
生效文件
sysctl -p /etc/sysctl.d/kubernetes.conf
7.设置免密登录,方便分发证书等
看看是否已经存在rsa公钥
cat ~/.ssh/id_rsa.pub
如果不存在就创建一个新的
ssh-keygen -t rsa
把id_rsa.pub文件内容copy到其他机器的授权文件中
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
8、软件包下载
cd /data/services/k8s
export VERSION=v1.20.2
下载master节点组件
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-apiserver
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-controller-manager
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-scheduler
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kubectl
下载worker节点组件
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-proxy
wget https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kubelet
下载etcd组件
wget https://github.com/etcd-io/etcd/releases/download/v3.4.10/etcd-v3.4.10-linux-amd64.tar.gz
tar -xvf etcd-v3.4.10-linux-amd64.tar.gz
mv etcd-v3.4.10-linux-amd64/etcd* .
rm -fr etcd-v3.4.10-linux-amd64*
统一修改文件权限为可执行
chmod +x kube*
8、软件包分发
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp kube-apiserver kube-controller-manager kube-scheduler kubectl root@${instance}:/usr/local/bin/
done
把worker先关组件分发到worker节点
WORKERS=(k8s-node01-sit k8s-node02-sit k8s-node03-sit)
for instance in ${WORKERS[@]}; do
scp kubelet kube-proxy root@${instance}:/usr/local/bin/
done
把etcd组件分发到etcd节点
ETCDS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${ETCDS[@]}; do
scp etcd etcdctl root@${instance}:/usr/local/bin/
done
二、生成证书
二、生成证书
下载
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
cfssl version
C-OU-O-L-S-C意义解释
CN:Common Name,用于从中提取该字段作为请求的用户名,不能改
C:Country, 国家
ST: State,州,省
L: Locality,地区,城市
O: Organization Name, 用于从中提前该字段作为请求用户所属的组,不能改
OU: Organization Unit Name,组织单位名称,公司部门
使用generate_certificate.sh脚本生成证书
generate_certificate.sh
#!/bin/bash
#生成k8s证书并分发到master、node
#1、生成根证书
#配置根证书配置文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "876000h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}
EOF
#生成证书和秘钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#2、admin客户端证书配置文件
#admin客户端证书配置文件
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "seven"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
#3、kubelet客户端证书
#生成kubelet客户端证书和私钥
WORKERS=(k8s-node01-sit k8s-node02-sit k8s-node03-sit)
WORKER_IPS=(172.21.74.5 172.21.74.6 172.21.74.7)
for ((i=0;i<${#WORKERS[@]};i++)); do
cat > ${WORKERS[$i]}-csr.json <<EOF
{
"CN": "system:node:${WORKERS[$i]}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"O": "system:nodes",
"OU": "seven",
"ST": "Beijing"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${WORKERS[$i]},${WORKER_IPS[$i]} \
-profile=kubernetes \
${WORKERS[$i]}-csr.json | cfssljson -bare ${WORKERS[$i]}
done
#4、kube-controller-manager客户端证书
#kube-controller-manager客户端证书配置文件
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-controller-manager",
"OU": "seven"
}
]
}
EOF
#生成kube-controller-manager客户端证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#5. kube-proxy客户端证书
#kube-proxy客户端证书配置文件
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "seven"
}
]
}
EOF
#生成kube-proxy客户端证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare kube-proxy
#6. kube-scheduler客户端证书
#kube-scheduler客户端证书配置文件
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-scheduler",
"OU": "seven"
}
]
}
EOF
#生成kube-scheduler客户端证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#7. kube-apiserver服务端证书
#kube-apiserver服务端证书配置文件
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "seven"
}
]
}
EOF
# apiserver的service ip地址(一般是svc网段的第一个ip)
KUBERNETES_SVC_IP=10.10.0.1
# 所有的master内网ip,逗号分隔(云环境可以加上master公网ip以便支持公网ip访问)
MASTER_IPS=172.21.74.2,172.21.74.3,172.21.74.4
# 生成证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${KUBERNETES_SVC_IP},${MASTER_IPS},127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local \
-profile=kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes
#8、Service Account证书
#配置文件
cat > service-account-csr.json <<EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "seven"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
service-account-csr.json | cfssljson -bare service-account
#9. proxy-client 证书
#配置文件
cat > proxy-client-csr.json <<EOF
{
"CN": "aggregator",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "seven"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
proxy-client-csr.json | cfssljson -bare proxy-client
#10. 分发客户端、服务端证书
#分发worker节点需要的证书和私钥
WORKERS=(k8s-node01-sit k8s-node02-sit k8s-node03-sit)
for instance in ${WORKERS[@]}; do
scp ca.pem ca-key.pem ${instance}-key.pem ${instance}.pem root@${instance}:~/
done
#分发master节点需要的证书和私钥
MASTER_IPS=172.21.74.2,172.21.74.3,172.21.74.4
OIFS=$IFS
IFS=','
for instance in ${MASTER_IPS}; do
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem proxy-client.pem proxy-client-key.pem root@${instance}:~/
done
IFS=$OIFS
三、kubernetes各组件的认证配置
1. kubelet
cd /data/services/k8s/ssl
WORKERS=(k8s-node01-sit k8s-node02-sit k8s-node03-sit)
for instance in ${WORKERS[@]}; do
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=${instance}.kubeconfig
kubectl config set-credentials system:node:${instance} \
--client-certificate=${instance}.pem \
--client-key=${instance}-key.pem \
--embed-certs=true \
--kubeconfig=${instance}.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:node:${instance} \
--kubeconfig=${instance}.kubeconfig
kubectl config use-context default --kubeconfig=${instance}.kubeconfig
done
2. kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials system:kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
3. kube-controller-manager
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
4. kube-scheduler
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
5. admin用户配置
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
6. 分发配置文件
WORKERS=(k8s-node01-sit k8s-node02-sit k8s-node03-sit)
for instance in ${WORKERS[@]}; do
scp ${instance}.kubeconfig kube-proxy.kubeconfig ${instance}:~/
done
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/
done
四、部署ETCD集群
copy必要的证书文件
在3台master上执行
mkdir -p /etc/etcd /var/lib/etcd
chmod 700 /var/lib/etcd
拷贝证书
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp ca.pem kubernetes-key.pem kubernetes.pem ${instance}:/etc/etcd/
done
在3台master上配置etcd.service文件
ETCD_NAME=$(hostname -s)
#ETCD_IP在master上执行时改成对应ip,ETCD_IP=172.21.74.2/3/4
ETCD_IP=172.21.74.2
# etcd所有节点的ip地址
ETCD_NAMES=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
ETCD_IPS=(172.21.74.2 172.21.74.3 172.21.74.4)
cat <<EOF > /etc/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \\
--name ${ETCD_NAME} \\
--cert-file=/etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\
--peer-cert-file=/etc/etcd/kubernetes.pem \\
--peer-key-file=/etc/etcd/kubernetes-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--peer-trusted-ca-file=/etc/etcd/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--initial-advertise-peer-urls https://${ETCD_IP}:2380 \\
--listen-peer-urls https://${ETCD_IP}:2380 \\
--listen-client-urls https://${ETCD_IP}:2379,https://127.0.0.1:2379 \\
--advertise-client-urls https://${ETCD_IP}:2379 \\
--initial-cluster-token etcd-cluster-0 \\
--initial-cluster ${ETCD_NAMES[0]}=https://${ETCD_IPS[0]}:2380,${ETCD_NAMES[1]}=https://${ETCD_IPS[1]}:2380,${ETCD_NAMES[2]}=https://${ETCD_IPS[2]}:2380 \\
--initial-cluster-state new \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
2. 启动etcd集群
所有etcd节点都配置好etcd.service后,启动etcd集群
systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd
3、验证etcd集群状态
ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.pem \
--cert=/etc/etcd/kubernetes.pem \
--key=/etc/etcd/kubernetes-key.pem
五、部署kubernetes控制平面
1. 配置 API Server
# 在3台master上创建kubernetes必要目录
mkdir -p /etc/kubernetes/ssl
# 准备证书文件
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem \
proxy-client.pem proxy-client-key.pem \
${instance}:/etc/kubernetes/ssl
done
# 配置kube-apiserver.service
# 在3台master上执行,本机内网ip,IP=172.21.74.2/3/4
IP=172.21.74.2
# apiserver实例数、etcd节点
APISERVER_COUNT=3
ETCD_ENDPOINTS=(172.21.74.2 172.21.74.3 172.21.74.4)
# 创建 apiserver service
cat <<EOF > /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--advertise-address=${IP} \\
--allow-privileged=true \\
--apiserver-count=${APISERVER_COUNT} \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/audit.log \\
--authorization-mode=Node,RBAC \\
--bind-address=0.0.0.0 \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \\
--etcd-servers=https://${ETCD_ENDPOINTS[0]}:2379,https://${ETCD_ENDPOINTS[1]}:2379,https://${ETCD_ENDPOINTS[2]}:2379 \\
--event-ttl=1h \\
--kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \\
--service-account-issuer=api \\
--service-account-key-file=/etc/kubernetes/ssl/service-account.pem \\
--service-account-signing-key-file=/etc/kubernetes/ssl/service-account-key.pem \\
--api-audiences=api,vault,factors \\
--service-cluster-ip-range=10.10.0.0/16 \\
--service-node-port-range=30000-32767 \\
--proxy-client-cert-file=/etc/kubernetes/ssl/proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/ssl/proxy-client-key.pem \\
--runtime-config=api/all=true \\
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--v=1
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
2. 配置kube-controller-manager
# 准备kubeconfig配置文件
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp kube-controller-manager.kubeconfig ${instance}:/etc/kubernetes/
done
# 创建 kube-controller-manager.service
cat <<EOF > /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--bind-address=0.0.0.0 \\
--cluster-cidr=10.11.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--cluster-signing-duration=876000h0m0s \\
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem \\
--service-cluster-ip-range=10.10.0.0/16 \\
--use-service-account-credentials=true \\
--v=1
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
3. 配置kube-scheduler
# 准备kubeconfig配置文件
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp kube-scheduler.kubeconfig ${instance}:/etc/kubernetes
done
# 创建 scheduler service 文件
cat <<EOF > /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--bind-address=0.0.0.0 \\
--port=0 \\
--v=1
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4. 启动服务
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler
systemctl restart kube-apiserver
systemctl restart kube-controller-manager
systemctl restart kube-scheduler
5. 服务验证
端口验证
netstat -ntlp
# 查看系统日志是否有组件的错误日志
journalctl -f
6. 配置kubectl
#kubectl是用来管理kubernetes集群的客户端工具,前面我们已经下载到了所有的master节点。下面我们来配置这个工具,让它可以使用。
# 创建kubectl的配置目录
mkdir ~/.kube/
# 把管理员的配置文件移动到kubectl的默认目录
MASTERS=(k8s-master01-sit k8s-master02-sit k8s-master03-sit)
for instance in ${MASTERS[@]}; do
scp ~/admin.kubeconfig ${instance}:~/.kube/config
done
# 测试
kubectl get nodes
#在执行 kubectl exec、run、logs 等命令时,apiserver 会转发到 kubelet。这里定义 RBAC 规则,授权 apiserver 调用 kubelet API。
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
六、部署kubernetes工作节点
在每个节点上我们会部署kubelet、kube-proxy、container runtime、cni、nginx-proxy
1、安装containerd
我这边测试环境没有使用docker了,而是使用container
先决条件,开启cgroupv2
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
update-grub
1、下载
mkdir /data/services/containerd
cd /data/services/containerd
# 设定containerd的版本号
VERSION=1.4.3
# 下载压缩包
wget https://github.com/containerd/containerd/releases/download/v${VERSION}/cri-containerd-cni-${VERSION}-linux-amd64.tar.gz
tar xf cri-containerd-cni-1.4.3-linux-amd64.tar.gz
2、拷贝配置文件到系统配置文件
cp -r etc/ /
cp -r usr/ /
containerd config default > /etc/containerd/config.toml
3、启动containerd
systemctl restart containerd.service
systemctl status containerd.service
systemctl enable containerd.service
2、安装kubelet、kube-proxy、container runtime、cni、nginx-proxy
1、配置kubelet
mkdir -p /etc/kubernetes/ssl/
mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem ca.pem ca-key.pem /etc/kubernetes/ssl/
mv ${HOSTNAME}.kubeconfig /etc/kubernetes/kubeconfig
#IP=节点ip,172.21.74.5/6/7
IP=172.21.74.5
# 写入kubelet配置文件
cat <<EOF > /etc/kubernetes/kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/etc/kubernetes/ssl/ca.pem"
authorization:
mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
- "10.10.0.10"
podCIDR: "10.11.0.0/16"
address: ${IP}
readOnlyPort: 0
staticPodPath: /etc/kubernetes/manifests
healthzPort: 10248
healthzBindAddress: 127.0.0.1
kubeletCgroups: /systemd/system.slice
resolvConf: "/etc/resolv.conf"
runtimeRequestTimeout: "15m"
kubeReserved:
cpu: 200m
memory: 512M
tlsCertFile: "/etc/kubernetes/ssl/${HOSTNAME}.pem"
tlsPrivateKeyFile: "/etc/kubernetes/ssl/${HOSTNAME}-key.pem"
EOF
配置kubelet服务
cat <<EOF > /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--config=/etc/kubernetes/kubelet-config.yaml \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--image-pull-progress-deadline=2m \\
--kubeconfig=/etc/kubernetes/kubeconfig \\
--network-plugin=cni \\
--node-ip=${IP} \\
--register-node=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
2. 配置nginx-proxy
nginx-proxy是一个用于worker节点访问apiserver的一个代理,是apiserver一个优雅的高可用方案,它使用kubelet的staticpod方式启动,让每个节点都可以均衡的访问到每个apiserver服务,优雅的替代了通过虚拟ip访问apiserver的方式。
Tips: nginx-proxy 只需要在没有 apiserver 的节点部署哦~
2.1 nginx配置文件
mkdir -p /etc/nginx
MASTER_IPS=(172.21.74.2 172.21.74.3 172.21.74.4)
# 执行前请先copy一份,并修改好upstream的 'server' 部分配置
cat <<EOF > /etc/nginx/nginx.conf
error_log stderr notice;
worker_processes 2;
worker_rlimit_nofile 130048;
worker_shutdown_timeout 10s;
events {
multi_accept on;
use epoll;
worker_connections 16384;
}
stream {
upstream kube_apiserver {
least_conn;
server ${MASTER_IPS[0]}:6443;
server ${MASTER_IPS[1]}:6443;
server ${MASTER_IPS[2]}:6443;
}
server {
listen 127.0.0.1:6443;
proxy_pass kube_apiserver;
proxy_timeout 10m;
proxy_connect_timeout 1s;
}
}
http {
aio threads;
aio_write on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 5m;
keepalive_requests 100;
reset_timedout_connection on;
server_tokens off;
autoindex off;
server {
listen 8081;
location /healthz {
access_log off;
return 200;
}
location /stub_status {
stub_status on;
access_log off;
}
}
}
EOF
2.2 nginx manifest
mkdir -p /etc/kubernetes/manifests/
cat <<EOF > /etc/kubernetes/manifests/nginx-proxy.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-proxy
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: kube-nginx
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
containers:
- name: nginx-proxy
image: docker.io/library/nginx:1.19
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 25m
memory: 32M
securityContext:
privileged: true
livenessProbe:
httpGet:
path: /healthz
port: 8081
readinessProbe:
httpGet:
path: /healthz
port: 8081
volumeMounts:
- mountPath: /etc/nginx
name: etc-nginx
readOnly: true
volumes:
- name: etc-nginx
hostPath:
path: /etc/nginx
EOF
3. 配置kube-proxy
4.1 配置文件
mv kube-proxy.kubeconfig /etc/kubernetes/
# 创建 kube-proxy-config.yaml
cat <<EOF > /etc/kubernetes/kube-proxy-config.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
bindAddress: 0.0.0.0
clientConnection:
kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
clusterCIDR: "10.11.0.0/16"
mode: ipvs
EOF
3.2 kube-proxy 服务文件
cat <<EOF > /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy-config.yaml
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4. 启动服务
systemctl daemon-reload
systemctl enable kubelet kube-proxy
systemctl restart kubelet kube-proxy
journalctl -f -u kubelet
journalctl -f -u kube-proxy
6. 手动下载镜像(服务器无法访问外网情况)
在每个工作节点下载pause镜像
crictl pull registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/pause:3.2
ctr -n k8s.io i tag registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/pause:3.2 k8s.gcr.io/pause:3.2
七、网络插件-Calico
参考文档:https://docs.projectcalico.org/getting-started/kubernetes/self-managed-onprem/onpremises
1、下载
文档中有两个配置,50以下节点和50以上节点,它们的主要区别在于这个:typha。
当节点数比较多的情况下,Calico 的 Felix组件可通过 Typha 直接和 Etcd 进行数据交互,不通过 kube-apiserver,降低kube-apiserver的压力。大家根据自己的实际情况选择下载。
下载后的文件是一个all-in-one的yaml文件,我们只需要在此基础上做少许修改即可
wget https://docs.projectcalico.org/manifests/calico.yaml
2. 修改IP自动发现
当kubelet的启动参数中存在--node-ip的时候,以host-network模式启动的pod的status.hostIP字段就会自动填入kubelet中指定的ip地址。
修改前:
- name: IP
value: "autodetect"
修改后:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
3. 修改 CIDR
修改前:
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
修改后(修改成自己的value)
- name: CALICO_IPV4POOL_CIDR
value: "10.10.0.0/16"
kubectl apply -f calico.yaml
八、dns插件-CoreDNS和nodelocaldns
coredns官方文档:https://coredns.io/plugins/kubernetes/
NodeLocal DNSCache:https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/
1. 部署CoreDNS
# 下载coredns配置all-in-one(addons/coredns.yaml)
wget https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
# 设置 coredns 的 cluster-ip
COREDNS_CLUSTER_IP=10.10.0.10
# 替换cluster-ip
sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" coredns.yaml
# 创建 coredns
kubectl apply -f coredns.yaml
2. 部署NodeLocal DNSCache
# 下载nodelocaldns配置all-in-one(addons/nodelocaldns.yaml)
wget https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml
# 设置 coredns 的 cluster-ip
COREDNS_CLUSTER_IP=10.10.0.10
# 替换cluster-ip
sed -i "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" nodelocaldns.yaml
# 创建 nodelocaldns
kubectl apply -f nodelocaldns.yaml
九、集群冒烟测试
- 创建nginx ds
写入配置
$ cat > nginx-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: nginx-ds
labels:
app: nginx-ds
spec:
type: NodePort
selector:
app: nginx-ds
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
selector:
matchLabels:
app: nginx-ds
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: nginx:1.19
ports:
- containerPort: 80
EOF
创建ds
$ kubectl apply -f nginx-ds.yml
- 检查各种ip连通性
检查各 Node 上的 Pod IP 连通性
$ kubectl get pods -o wide
在每个worker节点上ping pod ip
$ ping <pod-ip>
检查service可达性
$ kubectl get svc
在每个worker节点上访问服务
$ curl <service-ip>:<port>
在每个节点检查node-port可用性
$ curl <node-ip>:<port>
- 检查dns可用性
创建一个nginx pod
$ cat > pod-nginx.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: docker.io/library/nginx:1.19
ports:
- containerPort: 80
EOF
创建pod
$ kubectl apply -f pod-nginx.yaml
进入pod,查看dns
$ kubectl exec nginx -it -- /bin/bash
查看dns配置
root@nginx:/ cat /etc/resolv.conf
查看名字是否可以正确解析
root@nginx:/ curl nginx-ds
- 日志功能
测试使用kubectl查看pod的容器日志
$ kubectl get pods
$ kubectl logs <pod-name>
- Exec功能
测试kubectl的exec功能
$ kubectl get pods -l app=nginx-ds
$ kubectl exec -it <nginx-pod-name> -- nginx -v