1 机器规划
机器规划 | 角色 | 部署服务 |
wyl01 | master | kube-apiserver kube-controller-managerkube-scheduler etcd |
wyl02 | node | kube-apiserver kube-controller-manager kube-scheduler etcd |
wyl03 | node | kubelet kube-proxy docker flannel etcd |
2 安装docker并启动
阿里docker镜像,下载yum源,然后进行安装,三台都要安装。
[root@wyl01 opt]# yum install docker-ce -y
[root@wyl01 opt]# systemctl start docker.service
3 创建TLS证书
组件 | 使用的证书 |
etcd | ca.pem, server.pem, server-key.pem |
flannel | ca.pem, server.pem, server-key.pem |
kube-apiserver | ca.pem, server.pem, server-key.pem |
kubelet | ca.pem, ca-key.pem |
kube-proxy | ca.pem, kube-proxy.pem, kube-proxy-key.pem |
kubectl | ca.pem, admin.pem, admin-key.pem |
mkdir /opt/kubernetes/{bin,cfg,ssl} -p
cd /opt/kubernetes/ssl
# 下载证书工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
#改变指令权限
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
# 放到bin目录下
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
[root@wyl01 ssl]# ls
certificate.sh cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 cfssl_linux-amd64
4 生成证书
在master节点上操作
[root@wyl01 ssl]# cat certificate.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.52.128",
"192.168.52.129",
"192.168.52.130",
"10.10.10.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#-----------------------
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#-----------------------
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# 执行脚本
[root@wyl01 ssl]# sh certificate.sh
# 删除其他文件,只留下证书的文件
[root@wyl01 ssl]# ls |grep -v pem|xargs -i rm {}
[root@wyl01 ssl]# ll
total 32
-rw------- 1 root root 1679 Jul 19 09:51 admin-key.pem
-rw-r--r-- 1 root root 1399 Jul 19 09:51 admin.pem
-rw------- 1 root root 1679 Jul 19 09:51 ca-key.pem
-rw-r--r-- 1 root root 1359 Jul 19 09:51 ca.pem
-rw------- 1 root root 1679 Jul 19 09:51 kube-proxy-key.pem
-rw-r--r-- 1 root root 1403 Jul 19 09:51 kube-proxy.pem
-rw------- 1 root root 1679 Jul 19 09:51 server-key.pem
-rw-r--r-- 1 root root 1627 Jul 19 09:51 server.pem
5 部署etcd服务(三台)
[root@wyl01 software]# tar -xf etcd-v3.2.12-linux-amd64.tar.gz
[root@wyl01 software]# ls
etcd.sh etcd-v3.2.12-linux-amd64 etcd-v3.2.12-linux-amd64.tar.gz
[root@wyl01 software]# cd etcd-v3.2.12-linux-amd64/
[root@wyl01 etcd-v3.2.12-linux-amd64]# ls
Documentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md
[root@wyl01 etcd-v3.2.12-linux-amd64]# cp -r etcd* /opt/kubernetes/bin/
[root@wyl01 etcd-v3.2.12-linux-amd64]# ll /opt/kubernetes/bin/
total 32284
-rwxr-xr-x 1 root root 17817664 Jul 15 14:47 etcd
-rwxr-xr-x 1 root root 15234432 Jul 15 14:47 etcdctl
[root@wyl01 software]# vim /opt/kubernetes/cfg/etcd
# 另外2台node节点,只要修改第2行到第9行的ip为ip为本机的ip,ETCD_NAME的值分别为etcd02,etcd03.
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.52.128:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.52.128:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.52.128:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.52.128:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.52.128:2380,etcd02=https://192.168.52.129:2380,etcd03=https://192.168.52.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@wyl01 software]# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/opt/kubernetes/cfg/etcd
ExecStart=/opt/kubernetes/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-state=new \
--cert-file=/opt/kubernetes/ssl/server.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
--peer-cert-file=/opt/kubernetes/ssl/server.pem \
--peer-key-file=/opt/kubernetes/ssl/server-key.pem \
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
启动并查看服务
[root@wyl01 software]# systemctl daemon-reload
[root@wyl01 software]# systemctl start etcd.service
6 部署Flannel容器集群网络
部署master节点服务
部署kube-apiserver服务:
vim kube-apiserver
#!/bin/bash
MASTER_ADDRESS=${1:-"192.168.52.128"}
ETCD_SERVERS=${2:-"https://192.168.52.128:2379,https://192.168.52.129:2379,https://192.168.52.130:2379"}
cat <<EOF >/opt/kubernetes/config/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--insecure-bind-address=127.0.0.1 \\
--bind-address=${MASTER_ADDRESS} \\
--insecure-port=8080 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.10.10.0/24 \\
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/config/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
--etcd-certfile=/opt/kubernetes/ssl/server.pem \\
--etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/config/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
这里启动报错,将token文件拷贝到上面/opt/kubernetes/config目录下
cp /opt/kubernetes/ssl/token.csv /opt/kubernetes/config/token.csv
systemctl start kube-apiserver #启动服务
部署kube-controller
[root@wyl01 master]# vim controller-manager.sh #不需要修改
#!/bin/bash
MASTER_ADDRESS=${1:-"127.0.0.1"}
cat <<EOF >/opt/kubernetes/config/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.10.10.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/config/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
[root@wyl01 master]# sh controller-manager.sh 127.0.0.1
[root@wyl01 master]# vim scheduler.sh
#!/bin/bash
MASTER_ADDRESS=${1:-"127.0.0.1"}
cat <<EOF >/opt/kubernetes/config/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/config/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
[root@wyl01 master]# sh scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@wyl01 master]#
检查
部署node节点组件:
在node节点上我们要部署2个服务,一个kubelet,还有一个是kube-proxy,先wyl02,wy03上运行kubelet.sh脚本,部署kubelet服务,脚本内容如下:
[root@wyl03 software]# sh kubelet.sh 192.168.52.130 10.10.10.2
脚本内容:
#!/bin/bash
NODE_ADDRESS=${1:-"192.168.52.130"}
DNS_SERVER_IP=${2:-"10.10.10.2"}
cat <<EOF >/opt/kubernetes/config/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/config/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/config/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--allow-privileged=true \\
--cluster-dns=${DNS_SERVER_IP} \\
--cluster-domain=cluster.local \\
--fail-swap-on=false \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/config/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
运行中可能会报一下错误:
error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope
解决方案:
创建角色用户,在master节点创建下面这个用户
[root@wyl01 ssl]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding "kubelet-bootstrap" created
创建完,我们再执行脚本,查看进程。
部署kubelet-proxy服务,直接运行proxy.sh脚本即可,ps查看进程。
[root@wyl02 software]# sh proxy.sh 192.168.52.129
启动后,一切都正常后,也要再wyl03节点上启动这2个服务。
master节点认证
[root@wyl01 ssl]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8 1m kubelet-bootstrap Pending
node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY 3m kubelet-bootstrap Pending
# 认证
[root@wyl01 ssl]# kubectl certificate approve node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8
certificatesigningrequest "node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8" approved
[root@wyl01 ssl]# kubectl certificate approve node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY
certificatesigningrequest "node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY" approved
[root@wyl01 ssl]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-enYniqvIBNOIX4DGLV0cL14oj-tLIvDrboLrlKdcWK8 3m kubelet-bootstrap Approved,Issued
node-csr-siAO6abKNXjFH4bebYTW_k5T02so7BbiqdUyODPrRJY 4m kubelet-bootstrap Approved,Issued
[root@wyl01 ssl]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.52.129 Ready <none> 54s v1.9.0
192.168.52.130 Ready <none> 1m v1.9.0
运行一个nginx实例
[root@wyl01 ssl]# kubectl run nginx --image=nginx --replicas=3
deployment "nginx" created
[root@wyl01 ssl]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-8586cf59-c7bmj 1/1 Running 0 1m
nginx-8586cf59-ntwgf 1/1 Running 0 1m
nginx-8586cf59-pj45w 1/1 Running 0 1m
[root@wyl01 ssl]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-8586cf59-c7bmj 1/1 Running 0 1m 172.17.57.2 192.168.52.130
nginx-8586cf59-ntwgf 1/1 Running 0 1m 172.17.51.2 192.168.52.129
nginx-8586cf59-pj45w 1/1 Running 0 1m 172.17.57.3 192.168.52.130
[root@wyl01 ssl]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
service "nginx" exposed
[root@wyl01 ssl]# kubectl get svc nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx NodePort 10.10.10.132 <none> 88:39327/TCP 1h
在node节点输入curl 10.10.10.132:88或者在pc端输入本机node节点ip:39327访问
部署dashboard管理页面
[root@wyl01 ui]# ll
total 12
-rw-r--r-- 1 root root 1148 Jul 13 17:16 dashboard-deployment.yaml
-rw-r--r-- 1 root root 612 Jul 13 17:16 dashboard-rbac.yaml
-rw-r--r-- 1 root root 338 Jul 13 17:16 dashboard-service.yaml
[root@wyl01 ui]# kubectl create -f dashboard-rbac.yaml
serviceaccount "kubernetes-dashboard" created
clusterrolebinding "kubernetes-dashboard-minimal" created
[root@wyl01 ui]# kubectl create -f dashboard-deployment.yaml
deployment "kubernetes-dashboard" created
[root@wyl01 ui]# kubectl create -f dashboard-service.yaml
service "kubernetes-dashboard" created