二进制方式搭建kubernetes集群
1、创建机器 与 操作系统初始化
1)准备4台linux机器(1个master,3个node),版本7.x,RAM2G,CPU*2,硬盘>20G,
2)机器之间网络互通,并能访问外网
3)禁止swap分区(链接)
4)关闭防火墙,开机不自启
5)关闭selinux,开机不自启
6) 4台机器分别设置不同的hostname主机名(如:master,node1,node2,node3)
7)master机器添加DNS:
[root@centos75 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.44.81 node1 # 新增
192.168.44.82 node2 # 新增
192.168.44.83 node3 # 新增
8)将桥接的IPv4流量传递到iptables的链(所有机器执行)
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables =1
net.bridge.bridge-nf-call-iptables =1
EOF
sysctl --system # 使上面的生效
9)设置时间同步(所有机器执行)
[root@master1 ~]# yum -y install ntpdate
[root@master1 ~]# ntpdate time.windows.com
13 Apr 21:46:06 ntpdate[9828]: adjust time server 51.105.208.173 offset 0.001614
2、部署ETCD集群
1)准备cfssl证书生成工具
[root@master1 k8s] wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@master1 k8s] wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@master1 k8s] wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@master1 k8s] chmod +x cfssl*
[root@master1 k8s] ll
-rwxr-xr-x 1 root root 6595195 3月 30 2016 cfssl-certinfo_linux-amd64
-rwxr-xr-x 1 root root 2277873 3月 30 2016 cfssljson_linux-amd64
-rwxr-xr-x 1 root root 10376657 3月 30 2016 cfssl_linux-amd64
[root@master1 k8s] mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@master1 k8s] mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@master1 k8s] mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
2)生成ETCD证书
# 1、自签证书颁发机构
[root@master1 etcd]# ll
总用量 20
-rw-r--r-- 1 root root 369 4月 13 22:10 ca-config.json
-rw-r--r-- 1 root root 200 4月 13 22:11 ca-csr.json
[root@master1 etcd]# cat ca-csr.json
{
"CN":"etcdCA",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"Beijing",
"ST":"Beijing"
}
]
}
[root@master1 etcd]# cat ca-config.json
{
"signing":{
"default":{
"expiry":"87600h"
},
"profiles":{
"www":{
"expiry":"87600h",
"usages":[
"signing",
"keyencipherment",
"serverauth",
"clientauth"
]
}
}
}
}
2、生成证书
[root@master1 etcd]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca -
2021/04/13 22:12:55 [INFO] generating a new CA key and certificate from CSR
2021/04/13 22:12:55 [INFO] generate received request
2021/04/13 22:12:55 [INFO] received CSR
2021/04/13 22:12:55 [INFO] generating key: rsa-2048
2021/04/13 22:12:55 [INFO] encoded CSR
2021/04/13 22:12:55 [INFO] signed certificate with serial number 58359001854869331265800998975869456974201732930
[root@master1 etcd]# ll
总用量 20
-rw-r--r-- 1 root root 369 4月 13 22:10 ca-config.json
-rw-r--r-- 1 root root 952 4月 13 22:12 ca.csr
-rw-r--r-- 1 root root 200 4月 13 22:11 ca-csr.json
-rw------- 1 root root 1675 4月 13 22:12 ca-key.pem
-rw-r--r-- 1 root root 1265 4月 13 22:12 ca.pem
3、使用自签CA签发ETCD HTTPS证书
[root@master1 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
2021/04/13 22:23:48 [INFO] generate received request
2021/04/13 22:23:48 [INFO] received CSR
2021/04/13 22:23:48 [INFO] generating key: rsa-2048
2021/04/13 22:23:48 [INFO] encoded CSR
2021/04/13 22:23:48 [INFO] signed certificate with serial number 549799644328172364104961259989995803035534916274
2021/04/13 22:23:48 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master1 etcd]# ll
总用量 36
-rw-r--r-- 1 root root 369 4月 13 22:10 ca-config.json
-rw-r--r-- 1 root root 952 4月 13 22:12 ca.csr
-rw-r--r-- 1 root root 200 4月 13 22:11 ca-csr.json
-rw------- 1 root root 1675 4月 13 22:12 ca-key.pem
-rw-r--r-- 1 root root 1265 4月 13 22:12 ca.pem
-rw-r--r-- 1 root root 1070 4月 13 22:23 server.csr
-rw-r--r-- 1 root root 468 4月 13 22:23 server-csr.json
-rw------- 1 root root 1675 4月 13 22:23 server-key.pem
-rw-r--r-- 1 root root 1350 4月 13 22:23 server.pe
[root@master1 etcd]# cat server-csr.json
{
"CN":"etcd",
"hosts":[ # 集群所有节点ip
"192.168.44.80",
"192.168.44.81",
"192.168.44.82",
],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing"
}
]
}
3)部署ETCD集群
# 1.获取etcd二进制文件并解压,etcd与etcdctl移动值/opt/etcd/bin下
[root@master1] wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
[root@master1] tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
[root@master1] mkdir -p /opt/etcd/{bin,cfg,ssl}
[root@master1] mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
# 2.创建etcd服务文件,内容如下
[root@master1] vi /usr/lib/systemd/system/etcd.service
[root@master1] cat !$
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.targe
# 3.创建etcd配置文件,内容如下:
[root@master1] cd /opt/etcd/cfg
[root@master1] vi etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.44.80:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.44.80:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.44.80:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.44.80:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.44.80:2380,etcd-2=https://192.168.44.81:2380,etcd-3=https://192.168.44.82:2380" #集群节点信息
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 4.拷贝之前生成的证书至/opt/etcd/ssl/下
[root@master1] cp /root/TLS/etcd/ca*pem /opt/etcd/ssl/
[root@master1] cp /root/TLS/etcd/server*pem /opt/etcd/ssl/
# 5.启动并开机自启
[root@master1] systemctl daemon-reload
[root@master1] systemctl start etcd
[root@master1] systemctl enable etcd
6.将/usr/lib/systemd/system/etcd.service文件与/opt/etcd目录拷贝只集群中的其他节点相同路径下
[root@master1] scp -r /opt/etcd root@192.168.44.81:/opt
[root@master1] scp -r /opt/etcd root@192.168.44.82:/opt
[root@master1] scp /usr/lib/systemd/system/etcd.service root@192.168.44.81:/usr/lib/systemd/system/etcd.service
[root@master1] scp /usr/lib/systemd/system/etcd.service root@192.168.44.82:/usr/lib/systemd/system/etcd.service
# 更改etcd.conf文件的名字与ip(上一步复制的节点都要设置)
[root@node1 ~]# cat /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2" # 这一行要改,和下面的ETCD_INITIAL_CLUSTER对应
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.44.81:2380" # 本机ip
ETCD_LISTEN_CLIENT_URLS="https://192.168.44.81:2379" # 本机ip
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.44.81:2380" # 本机ip
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.44.81:2379" # 本机ip
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.44.80:2380,etcd-2=https://192.168.44.81:2380,etcd-3=https://192.168.44.82:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 其他节点同样启动与设置开机自启
[root@node1] systemctl daemon-reload
[root@node1] systemctl start etcd
[root@node1] systemctl enable etcd
[root@node2] systemctl daemon-reload
[root@node2] systemctl start etcd
[root@node2] systemctl enable etcd
# etcd集群部署完成
3、安装api-server(master节点操作,常见报错)
1)创建以下文件
[root@master1 apiserver]# ll | grep json
-rw-r--r-- 1 root root 379 4月 14 22:27 ca-config.json
-rw-r--r-- 1 root root 254 4月 14 21:42 ca-csr.json
-rw-r--r-- 1 root root 605 4月 14 22:14 server-csr.json
[root@master1 apiserver]# cat ca-config.json
{
"signing":{
"default":{
"expiry":"87600h"
},
"profiles":{
"kubernetes":{
"expiry":"87600h",
"usages":[
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
[root@master1 apiserver]# cat ca-csr.json
{
"CN":"kubernetes",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"Beijing",
"ST":"Beijing",
"O":"k8s",
"OU":"System"
}
]
}
[root@master1 apiserver]# cat server-csr.json
{
"CN": "kubernetes",
"hosts": [ # ip白名单,下面192的ip换成自己集群的ip
"10.0.0.1",
"127.0.0.1",
"192.168.44.80",
"192.168.44.81",
"192.168.44.82",
"192.168.44.83",
"192.168.44.84",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
2)生成证书
[root@master1 apiserver]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca -
[root@master1 apiserver]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
[root@master1 apiserver]# ls | grep pem
ca-key.pem
ca.pem
server-key.pem
server.pem
3)下载server包并解压,下载地址
[root@master1 apiserver]# ll | grep tar.gz
-rw-r--r-- 1 root root 363654483 4月 14 22:42 kubernetes-server-linux-amd64.tar.gz
[root@master1 apiserver]# tar zxvf kubernetes-server-linux-amd64.tar.gz
[root@master1 apiserver]# cd kubernetes/server/bin/
[root@master1 bin]# cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin/
[root@master1 bin]# cp kubectl /usr/bin/
4)创建如下文件
[root@master1 bin]# vi /opt/kubernetes/cfg/kube-apiserver.conf
[root@master1 cfg]# cat /opt/kubernetes/cfg/kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.44.80:2379,https://192.168.44.81:2379,https://192.168.44.82:2379 \
--bind-address=192.168.44.80 \
--secure-port=6443 \
--advertise-address=192.168.44.80 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
[root@master1 ssl]# head -c 16 /dev/urandom | od -An -t x |tr -d ''
bbfbffc6 27fda09d 8b72aaf3 bdbd1f35
[root@master1 ssl]# vi /opt/kubernetes/cfg/token.csv
[root@master1 ssl]# cat !$
bbfbffc6 27fda09d 8b72aaf3 bdbd1f35,kubelet-bootstrap,10001,"system:nodebootstrapper"
[root@master1 ssl]# vi /usr/lib/systemd/system/kube-apiserver.service
[root@master1 ssl]# cat !$
cat /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
5)后续
# 启动并设置开机自启
[root@master1 ssl]# systemctl daemon-reload
[root@master1 ssl]# systemctl start kube-apiserver.service
[root@master1 ssl]# systemctl enable kube-apiserver.service
# 授权kubelet-bootstrap用户允许请求证书
[root@master1 ssl]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
4、安装controller-manager
# 创建配置文件
[root@master1 ssl]# vim /opt/kubernetes/cfg/kube-controller-manager.conf
[root@master1 ssl]# cat !$
cat /opt/kubernetes/cfg/kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \
--master=127.0.0.1:8080 \
--bind-address=127.0.0.1 \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s"
# 创建服务文件
[root@master1 ssl]# vim /usr/lib/systemd/system/kube-controller-manager.service
[root@master1 ssl]# cat !$
cat /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.targe
# 启动与开机自启
[root@master1 ssl] systemctl daemon-reload
[root@master1 ssl] systemctl start kube-controller-manager.service
[root@master1 ssl] systemctl enable kube-controller-manager.service # 未解决
5、安装kube-scheduler
# 创建scheduler 配置文件
[root@master1 ~]# vi /opt/kubernetes/cfg/kube-scheduler.conf
[root@master1 ~]# cat /opt/kubernetes/cfg/kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect \
--master=127.0.0.1:8080 \
--bind-address=127.0.0.1"
# 创建服务文件
[root@master1 ~]# vi /usr/lib/systemd/system/kube-scheduler.service
[root@master1 ~]# cat !$
cat /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
# 启动与开机自启
[root@master1 ~]# systemctl daemon-reload
[root@master1 ~]# systemctl start kube-scheduler.service
[root@master1 ~]# systemctl enable !$
至此,master节点部署完成,查看集群状态:
# 查看集群状态
[root@master1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok # controller-manager 正常
scheduler Healthy ok # scheduler 正常
etcd-1 Healthy {"health":"true"} # etcd 正常
etcd-0 Healthy {"health":"true"} # etcd 正常
etcd-2 Healthy {"health":"true"} # etcd 正常