上一篇文章介绍了kubeasz利用ansible搭建了k8s的基础组件。
本篇主要部署cluster-addon,即部署Coredns、Metrics Server以及Dashboard等组件的部署
[root@master1 kubeasz-2.2.4]# ansible-playbook 07.cluster-addon.yml
- 1.根据hosts文件中配置的
CLUSTER_DNS_SVC_IP
CLUSTER_DNS_DOMAIN
等参数生成kubedns.yaml和coredns.yaml文件- 2.注册变量pod_info,pod_info用来判断现有集群是否已经运行各种插件
- 3.根据pod_info和
配置开关
逐个进行/跳过插件安装
执行过程
[root@master1 kubeasz-2.2.4]# ansible-playbook 07.cluster-addon.yml
PLAY [kube_node] ********************************************************************************************************************
TASK [Gathering Facts] **************************************************************************************************************
ok: [192.168.149.32]
ok: [192.168.149.31]
TASK [cluster-addon : 在 node 节点创建相关目录] **********************************************************************************************
ok: [192.168.149.31] => (item=/opt/kube/kube-system)
ok: [192.168.149.32] => (item=/opt/kube/kube-system)
TASK [cluster-addon : 准备 DNS的部署文件] **************************************************************************************************
changed: [192.168.149.32] => (item=kubedns)
changed: [192.168.149.31] => (item=kubedns)
changed: [192.168.149.31] => (item=coredns)
changed: [192.168.149.32] => (item=coredns)
TASK [cluster-addon : 获取所有已经创建的POD信息] ***********************************************************************************************
changed: [192.168.149.31]
TASK [cluster-addon : 获取已下载离线镜像信息] **************************************************************************************************
changed: [192.168.149.31]
TASK [cluster-addon : 尝试推送离线coredns镜像(若执行失败,可忽略)] ***********************************************************************************
changed: [192.168.149.32]
changed: [192.168.149.31]
TASK [cluster-addon : 获取coredns离线镜像推送情况] ********************************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 导入coredns的离线镜像(若执行失败,可忽略)] ************************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 创建coredns部署] **************************************************************************************************
changed: [192.168.149.31]
TASK [cluster-addon : 尝试推送离线 metrics-server镜像(若执行失败,可忽略)] ***************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 获取metrics-server离线镜像推送情况] *************************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 导入 metrics-server的离线镜像(若执行失败,可忽略)] ****************************************************************************
changed: [192.168.149.32]
changed: [192.168.149.31]
TASK [cluster-addon : 创建 metrics-server部署] ******************************************************************************************
changed: [192.168.149.31]
TASK [cluster-addon : 尝试推送离线 dashboard 镜像(若执行失败,可忽略)] *******************************************************************************
changed: [192.168.149.32] => (item=dashboard_v2.1.0.tar)
changed: [192.168.149.31] => (item=dashboard_v2.1.0.tar)
changed: [192.168.149.32] => (item=metrics-scraper_v1.0.6.tar)
changed: [192.168.149.31] => (item=metrics-scraper_v1.0.6.tar)
TASK [cluster-addon : 获取dashboard离线镜像推送情况] ******************************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 导入 dashboard 的离线镜像(docker)] ***********************************************************************************
changed: [192.168.149.31] => (item=dashboard_v2.1.0.tar)
changed: [192.168.149.32] => (item=dashboard_v2.1.0.tar)
changed: [192.168.149.31] => (item=metrics-scraper_v1.0.6.tar)
changed: [192.168.149.32] => (item=metrics-scraper_v1.0.6.tar)
TASK [cluster-addon : 创建 dashboard部署] ***********************************************************************************************
changed: [192.168.149.31]
TASK [cluster-addon : 尝试推送离线 traefik v2 镜像(若执行失败,可忽略)] ******************************************************************************
changed: [192.168.149.32]
changed: [192.168.149.31]
TASK [cluster-addon : 获取traefik离线镜像推送情况] ********************************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 导入 traefik的离线镜像(若执行失败,可忽略)] ***********************************************************************************
changed: [192.168.149.31]
changed: [192.168.149.32]
TASK [cluster-addon : 创建 traefik部署] *************************************************************************************************
fatal: [192.168.149.31]: FAILED! => {"changed": true, "cmd": "/etc/ansible/bin/kubectl apply -f /etc/ansible/manifests/ingress/traefik/traefik-ingress-v2.yaml", "delta": "0:00:00.481497", "end": "2021-09-24 11:11:34.209173", "msg": "non-zero return code", "rc": 1, "start": "2021-09-24 11:11:33.727676", "stderr": "Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition\nerror: unable to recognize \"/etc/ansible/manifests/ingress/traefik/traefik-ingress-v2.yaml\": no matches for kind \"IngressRoute\" in version \"traefik.containo.us/v1alpha1\"", "stderr_lines": ["Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition", "error: unable to recognize \"/etc/ansible/manifests/ingress/traefik/traefik-ingress-v2.yaml\": no matches for kind \"IngressRoute\" in version \"traefik.containo.us/v1alpha1\""], "stdout": "serviceaccount/traefik-ingress-controller created\nclusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created\nclusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created\ncustomresourcedefinition.apiextensions.k8s.io/ingressroutes.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/ingressroutetcps.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/middlewares.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/tlsoptions.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/traefikservices.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/tlsstores.traefik.containo.us created\ncustomresourcedefinition.apiextensions.k8s.io/ingressrouteudps.traefik.containo.us created\nconfigmap/traefik-config created\ndeployment.apps/traefik-ingress-controller created\nservice/traefik-svc created", "stdout_lines": ["serviceaccount/traefik-ingress-controller created", "clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created", "clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created", "customresourcedefinition.apiextensions.k8s.io/ingressroutes.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/ingressroutetcps.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/middlewares.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/tlsoptions.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/traefikservices.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/tlsstores.traefik.containo.us created", "customresourcedefinition.apiextensions.k8s.io/ingressrouteudps.traefik.containo.us created", "configmap/traefik-config created", "deployment.apps/traefik-ingress-controller created", "service/traefik-svc created"]}
...ignoring
PLAY RECAP **************************************************************************************************************************
192.168.149.31 : ok=21 changed=19 unreachable=0 failed=0 skipped=14 rescued=0 ignored=1
192.168.149.32 : ok=15 changed=13 unreachable=0 failed=0 skipped=12 rescued=0 ignored=0
验证阶段
1. kubectl get pod -n kube-system ##查看生成的pod
[root@master1 kubeasz-2.2.4]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5787695b7f-9qg69 1/1 Running 0 5m21s
dashboard-metrics-scraper-79c5968bdc-pv76z 1/1 Running 0 5m6s
kube-flannel-ds-amd64-898kd 1/1 Running 0 41h
kube-flannel-ds-amd64-cpld8 1/1 Running 0 41h
kube-flannel-ds-amd64-fglk7 1/1 Running 0 41h
kube-flannel-ds-amd64-ghsds 1/1 Running 0 41h
kubernetes-dashboard-c4c6566d6-m8p87 1/1 Running 0 5m6s
metrics-server-8568cf894b-rxkqx 1/1 Running 0 5m17s
traefik-ingress-controller-f8cf6f7cb-t4lqd 1/1 Running 0 4m58s
2. Metrics Sever
[root@master1 kubeasz-2.2.4]# kubectl top node;kubectl top pod -n kube-system
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
192.168.149.22 71m 1% 2995Mi 42%
192.168.149.23 78m 1% 1805Mi 25%
192.168.149.31 33m 0% 1537Mi 21%
192.168.149.32 36m 0% 1511Mi 21%
NAME CPU(cores) MEMORY(bytes)
coredns-5787695b7f-9qg69 2m 14Mi
dashboard-metrics-scraper-79c5968bdc-pv76z 1m 9Mi
kube-flannel-ds-amd64-898kd 2m 20Mi
kube-flannel-ds-amd64-cpld8 2m 20Mi
kube-flannel-ds-amd64-fglk7 2m 10Mi
kube-flannel-ds-amd64-ghsds 2m 22Mi
kubernetes-dashboard-c4c6566d6-m8p87 1m 13Mi
metrics-server-8568cf894b-rxkqx 1m 13Mi
traefik-ingress-controller-f8cf6f7cb-t4lqd 1m 19Mi
3.Dashboard
- 旧版(<= 1.6)建议通过apiserver访问,直接通过apiserver 认证授权机制去控制 dashboard权限,详见旧版文档
- 新版(>= 1.7)
- 可以使用自带的登录界面,使用不同Service Account Tokens 去控制访问 dashboard的权限
- 增加了通过
api-server
方式访问dashboard- 增加了
NodePort
方式暴露服务,这样集群外部可以使用https://NodeIP:NodePort
(注意是https不是http,区别于1.6.3版本) 直接访问 dashboard。
[root@master1 kubeasz-2.2.4]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.149.22:6443
CoreDNS is running at https://192.168.149.22:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://192.168.149.22:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
Metrics-server is running at https://192.168.149.22:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
通过https://NodeIP:NodePort方式访问
登录Dashboard方式:
- token登录
- 配置带有secret的kubeconfig
- 修改/etc/ansible/manifests/dashboard/kubernetes-dashboard.yaml ,deployment.spec.template.spec.containers.arg添加参数,将认证模式改成basic
args:
- --auto-generate-certificates
- --authentication-mode=basic
- 再easzctl basic-auth -s 生成账号密码登录
token方式
[root@master1 kube-master]# kubectl get secret -A | grep admin
kube-system admin-user-token-dtfq6 kubernetes.io/service-account-token 3 4h54m
[root@master1 kube-master]# kubectl describe secrets admin-user-token-dtfq6 -n kube-system
Name: admin-user-token-dtfq6
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 3d0a9e05-9194-44ac-a981-6b364b3d5fee
Type: kubernetes.io/service-account-token
Data
====
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImtOempkcWpNTy1PUm1rUE15bU9RMUk1YV9GcHRFMF9sWXpUNjMyMDJxS1EifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWR0ZnE2Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIzZDBhOWUwNS05MTk0LTQ0YWMtYTk4MS02YjM2NGIzZDVmZWUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.ch1VkSPfo2gTpSCX32-BcbQR8w_SQUcETQWTZNf4ts_uB4V_wIGy5ihGX__v7dh25LmOyqG9oY8g2DUPNBWjDKWtGUk95XzQQOCSDnJuXrTjLd9SCp0trSPjkYToe4xjbT84qvbusTddzPaTlM9O9QFBqNuCWwpfpYc5qVhFJo87_m2F3o3WEBaaQtAYA7NEkPbxS89MMKgvniEkYFpp9CI7hHJYI_VLIY8Dzkdf7lxbVccThdTJ9FuyelK5X4n5DmXpmyN_4Gf7sbwC1v05aVXjzD8URdLCUNYKcJC0dvZdadpADfg33VSEnjL4US1PRSxjp6WrxyGtJZsTkxiteQ
ca.crt: 1350 bytes
kubeconfig方式
思路:找到kubelet.kubeconfig文件,拷贝一份并将token(secret)参数添加至末尾生成kubeconfig文件
systemctl status kubelet
cat /etc/kubernetes/kubelet.kubeconfig
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig
cp /etc/kubernetes/kubelet.kubeconfig /root/kubeconfig
向/root/kubeconfig添加token参数
web界面ui利用账号密码登录
[root@master1 ~]# cat /etc/ansible/manifests/dashboard/kubernetes-dashboard.yaml
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --authentication-mode=basic
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 80
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard