一,首先安装dasboard
将kubernetes-dashboard需要的镜像上传到工作节点(hd2.com)和hd3.com
将其上传为镜像:docker load -i dashboard_2_0_0.tar.gz(hd2.com,hd3.com)
vim kubernetes-dashboard.yaml
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-beta8
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
刷新资源清单:kubectl appy -f kubernetes-dashboard.yaml
查看状态:kubectl get pods -n kubernetes-dashboard
修改service type类型变成NodePort:
kubectl edit svc kubernetes-dashboard-n kubernetes-dashboard
将type:CulsterIP改为NodePort保存退出,再次进行查看,则可以看到:
原来的cluster变成了NodePort如上图所示。
使用主机IP和上图暴露出的端口即可访问:图形化界面
通过token 令牌访问dashboard:kubectl create clusterrolebinding dashboard-cluster-admin--clusterrole=cluster-admin--serviceaccount=kubernetes-dashboard:kubernetes-dashboard
查看kubectl-dashboard名称空间下的secret:kubectl get secret -n kubernetes-dashboard
找到对应的带有token值:kubectl describe secret 和带有token的kubernetes-dashboard名。会产生一个令牌。
将此令牌复制到访问界面的token下,点击login即可登录
通过kubernetes-dashboard创建容器。
首先将nginx.tar.gz镜像包上传到hd2.com和hd3.com上,并将其上传为镜像:docker load -i nginx.tar.gz
再进入到图形化界面:点击右上角+ create from form
创建新的pod
依次输入信息:App name -> Container image -> Number of pods ->Server(选取External) -> Port-80->Deploy
完成添加
在左侧选择service可以看到端口,在宿主机访问主机IP和该端口,可以访问到nginx的测试页面,即为成功。
安装metrics-server组件
metrics-server是一个集群范围内的资源数据集和工具,同样的metrics-server也只是显示数据,并不提供数据存储服务,主要关注的是资源度量api的实现,比如cpu,文件描述,内存,请求延时等指标,metric-server收集数据给k8s集群内使用,如kubectl,hpa,scheduler等,
部署metrics-server组件。
将metrics-server-amd64-0-3-6.tar.gz,addon.tar.gz上传至镜像仓库
三台主机都进行操作:docker load -i metrics-server-amd64-0-3-6.tar.gz
docker load -i addon.tar.gz
部署metrics-server服务
在/etc/kubernetes/manifests里面改一下apiserver的配置
vim /etc/kubernetes/manifests/kube-apiserver.yaml
增加如下内容:
在command模块的第四行下添加:---enable-aggregator-routing=true
重新更新apiserver配置:
kubectl apply -f /etc/kubectl/manifests/kube-apiserver.yaml
kubectl get pods - n kube-system
会有一个拉取失败的将失败的进行删除:
kubectl delete pods kube-apiserver -n kube-system
上传metrics.yaml文件:
vim metrics.yaml由于文件较长,在文件夹中k8s使用压缩包中存放,可以自取
kubectl apply -f metrics.yaml
kubectl get pods -n kube-system| grep metrics
出现运行后即可进行下一步。
测试kybectl top命令
kubectl top pods -n kube-system
kubectl top nodes
将scheduler,confroller-manager端口变成物理机可以监听的端口
kubectl get cs
默认在1.19之后10252和10251都是绑定在127的如果想要通过prometheus监控,会采集不到数据,所以可以把端口绑定到物理机按照下列方法进行:
vim /etc/kubernetes/manifests/kube-scheduler.yaml
将--bind-address=127.0.0.1变成--bind-address=192.168.1.11
-port=0删除掉
将httpGet字段下的hosts由127.0.0.1变成192.168.1.11(192.168.1.11是k8s的控制节点的IP)
vim /etc/kubernetes/manifests/kube-confroller-manager.yaml