kubernetes实践指南(五)

一、k8s插件介绍
二、dns插件安装
三、dashboard安装
四、metric-server安装

一、k8s插件介绍

1、在kubernetes-server源码包解压完后

解压目录/kubernetes/cluster/addons/有提供各种插件的安装方法
包括dns、dashboard、prometheus、cluster-monitoring、metric-server等等

2、举例dashboard

解压目录/kubernetes/cluster/addons/dashboard

[root@master1 dashboard]# ll
total 32
-rw-rw-r-- 1 root root  264 Jun  5 02:15 dashboard-configmap.yaml
-rw-rw-r-- 1 root root 1822 Jun  5 02:15 dashboard-controller.yaml
-rw-rw-r-- 1 root root 1353 Jun  5 02:15 dashboard-rbac.yaml
-rw-rw-r-- 1 root root  551 Jun  5 02:15 dashboard-secret.yaml
-rw-rw-r-- 1 root root  322 Jun  5 02:15 dashboard-service.yaml
-rw-rw-r-- 1 root root  242 Jun  5 02:15 MAINTAINERS.md
-rw-rw-r-- 1 root root  176 Jun  5 02:15 OWNERS
-rw-rw-r-- 1 root root  400 Jun  5 02:15 README.md

基本默认的yaml就可以满足需求,yaml中涉及的image可以从外部下载导入到本地仓库,并修改为内网环境即可

二、dns插件安装

1、配置文件coredns.yaml

[root@master1 coredns]# cat coredns.yaml
# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: k8s.gcr.io/coredns:1.3.1
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.244.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
[root@master1 coredns]# sed -i -e s/__PILLAR__DNS__DOMAIN__/cluster.local/ -e s/__PILLAR__DNS__SERVER__/10.244.0.2/ coredns.yaml
[root@master1 coredns]# sed -i 's@k8s.gcr.io/coredns:1.3.1@192.168.192.234:888/coredns:1.3.1@' coredns.yaml

2、安装和说明

[root@master1 coredns]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

信息说明:
ClusterRole:名称[system:coredns]必要权限
ServiceAccount: 创建一个serviceaccount名称[coredns] 给pod使用,k8s会给该serviceaccount创建一个token
ConfigMap:名称[coredns]dns配置文件
Deployment:部署coredns容器,挂载了configmap[coredns],使用的serviceaccount为coredns
Service:名称kube-dns和Deployment的pod建立关联关系
ClusterRoleBinding[system:coredns]=Role[system:coredns]+ServiceAccount[coredns] 建立绑定关系,pod就有了这个Role的权限

3、验证

[root@master1 coredns]# kubectl get all -n kube-system
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-5497cfc9bb-jbvn2   1/1     Running   0          18s

NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns   ClusterIP   10.244.0.2   <none>        53/UDP,53/TCP,9153/TCP   18s

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           18s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-5497cfc9bb   1         1         1       18s

重新部署一个应用发现:

[root@master1 yaml]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
base-7d77d4cc9d-9sbdn   1/1     Running   0          118s
base-7d77d4cc9d-9vx6n   1/1     Running   0          118s
base-7d77d4cc9d-ctwjt   1/1     Running   0          118s
base-7d77d4cc9d-kmfbf   1/1     Running   0          118s
base-7d77d4cc9d-wfbhh   1/1     Running   0          118s
[root@master1 yaml]# kubectl exec -it base-7d77d4cc9d-9sbdn -- /bin/bash
[root@base-7d77d4cc9d-9sbdn /home/admin]
#cat /etc/resolv.conf 
nameserver 10.244.0.2
search default.svc.cluster.local svc.cluster.local cluster.local
options ndots:5

测试域名:$服务名称.$名称空间.svc.cluster.local

[root@base-7d77d4cc9d-9sbdn /home/admin] ping kube-dns.kube-system.svc.cluster.local  
[root@base-7d77d4cc9d-9sbdn /home/admin] ping kubernetes.default.svc.cluster.local

三、dashboard安装

1、源码解压后目录

解压目录/kubernetes/cluster/addons/dashboard

[root@master1 dashboard]# ll
-rw-rw-r-- 1 root root  264 Jun  5 02:15 dashboard-configmap.yaml
-rw-rw-r-- 1 root root 1831 Jul 29 23:58 dashboard-controller.yaml
-rw-rw-r-- 1 root root 1353 Jun  5 02:15 dashboard-rbac.yaml
-rw-rw-r-- 1 root root  551 Jun  5 02:15 dashboard-secret.yaml
-rw-rw-r-- 1 root root  322 Jun  5 02:15 dashboard-service.yaml
-rw-rw-r-- 1 root root  242 Jun  5 02:15 MAINTAINERS.md
-rw-rw-r-- 1 root root  176 Jun  5 02:15 OWNERS
-rw-rw-r-- 1 root root  400 Jun  5 02:15 README.md
[root@master1 dashboard]# sed -i 's@k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1@192.168.192.234:888/kubernetes-dashboard-amd64:v1.10.1@' ./dashboard-controller.yaml 
1.1 dashboard-server.yaml
[root@master1 dashboard]# cat dashboard-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 443
    targetPort: 8443
  type: NodePort

备注:Service修改type为NodePort
1.2 dashboard-configmap.yaml
[root@master1 dashboard]# cat dashboard-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    # Allows editing resource and makes sure it is created first.
    addonmanager.kubernetes.io/mode: EnsureExists
  name: kubernetes-dashboard-settings
  namespace: kube-system
1.3 dashboard-controller.yaml
[root@master1 dashboard]# cat dashboard-controller.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      containers:
      - name: kubernetes-dashboard
        image: 192.168.192.234:888/kubernetes-dashboard-amd64:v1.10.1
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 50m
            memory: 100Mi
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
        - name: tmp-volume
          mountPath: /tmp
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
1.4 dashboard-rbac.yaml
[root@master1 dashboard]# cat dashboard-rbac.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
1.5 dashboard-secret.yaml
[root@master1 dashboard]# cat dashboard-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    # Allows editing resource and makes sure it is created first.
    addonmanager.kubernetes.io/mode: EnsureExists
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    # Allows editing resource and makes sure it is created first.
    addonmanager.kubernetes.io/mode: EnsureExists
  name: kubernetes-dashboard-key-holder
  namespace: kube-system
type: Opaque
1.6 信息说明
ConfigMap[kubernetes-dashboard]= 为空
Secret[kubernetes-dashboard-key-holder] 和 Secret[kubernetes-dashboard-certs]
RoleBinding[kubernetes-dashboard-minimal]=Role[kubernetes-dashboard-minimal]+ServiceAccount[ kubernetes-dashboard]
Deployment[kubernetes-dashboard]  #auto-generate-certificates自动产生证书,挂载secret [kubernetes-dashboard-certs]和sa: kubernetes-dashboard
Service: kubernetes-dashboard 关联后端pod 

2、安装和说明

[root@master1 dashboard]# kubectl apply -f   ./
configmap/kubernetes-dashboard-settings created
serviceaccount/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-key-holder created
service/kubernetes-dashboard created

3、验证

3.1 查看分配的nodePort
[root@master1 dashboard]# kubectl get svc kubernetes-dashboard  -n kube-system 
NAME                   TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.244.245.24   <none>        443:32230/TCP   4m42s

由于是内网,这里做了四层代理,代理到集群物理机的32230端口

[root@master1 ~]# kubectl cluster-info
Kubernetes master is running at https://127.0.0.1:8443
CoreDNS is running at https://127.0.0.1:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://127.0.0.1:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

查看dashboard命令参数

[root@master1 ~]# kubectl exec -it kubernetes-dashboard-fcdb97778-w2gxn -n kube-system -- /dashboard --help
3.2 token登陆访问

访问dashboard: https://10.10.110.103:32230 //代理地址为10.10.110.103
kubernetes实践指南(五)
dashboard默认不支持证书,
创建token登陆

[root@master1 ~]# kubectl create sa dashboard-admin -n kube-system
serviceaccount/dashboard-admin created
[root@master1 ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@master1 ~]# ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
[root@master1 ~]# DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
[root@master1 ~]# echo ${DASHBOARD_LOGIN_TOKEN}

使用该token登陆

kubernetes实践指南(五)

3.3 创建使用token的kubeconfig
[root@master1 ~]# kubectl config set-cluster kubernetes   --certificate-authority=/etc/kubernetes/cert/ca.pem   --embed-certs=true   --server=https://127.0.0.1:8443   --kubeconfig=dashboard.kubeconfig
[root@master1 ~]# kubectl config set-credentials dashboard_user   --token=${DASHBOARD_LOGIN_TOKEN}   --kubeconfig=dashboard.kubeconfig
[root@master1 ~]# kubectl config set-context default   --cluster=kubernetes   --user=dashboard_user   --kubeconfig=dashboard.kubeconfig
[root@master1 ~]# kubectl config use-context default --kubeconfig=dashboard.kubeconfig

四、metric-server安装

采集:cAdvisor, Heapster, collectd, Statsd, Tcollector, Scout
存储:InfluxDb, OpenTSDB, Elasticsearch
展示:Graphite, Grafana, facette, Cacti, Ganglia, DataDog
告警:Nagios, prometheus, Icinga, Zabbix

Heapster:在k8s集群中获取metrics和事件数据,写入InfluxDB,heapster收集的数据比cadvisor多却全,而且存储在influxdb的也少。Heapster是一个收集者,将每个Node上的cAdvisor的数据进行汇总,然后导到InfluxDB。Heapster的前提是使用cAdvisor采集每个node上主机和容器资源的使用情况
InfluxDB :时序数据库,提供数据的存储,存储在指定的目录下
Cadvisor:将数据,写入InfluxDB
Grafana :提供了WEB控制台,自定义查询指标,从InfluxDB查询数据,并展示
Prometheus: 支持在容器,node,k8s集群等本身监控

注:Heapster在V1.12之后有了新的替代品metric-server

1、修改配置文件

下载配置文件

https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/aggregated-metrics-reader.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/auth-delegator.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/auth-reader.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/metrics-apiservice.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/metrics-server-deployment.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/metrics-server-service.yaml
https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/resource-reader.yaml

或者clone:# git clone https://github.com/kubernetes-incubator/metrics-server.git
[root@master1 metric]# cd metrics-server/deploy/1.8+/

1.1 aggregated-metrics-reader.yaml

[root@master1 1.8+]# cat aggregated-metrics-reader.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:aggregated-metrics-reader
  labels:
    rbac.authorization.k8s.io/aggregate-to-view: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
  resources: ["pods"]
  verbs: ["get", "list", "watch"]

1.2 auth-delegator.yaml

[root@master1 1.8+]# cat auth-delegator.yaml 
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system

1.3 auth-reader.yaml

[root@master1 1.8+]# cat auth-reader.yaml 
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system

1.4 metrics-apiservice.yaml

[root@master1 1.8+]# cat metrics-apiservice.yaml 
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
  name: v1beta1.metrics.k8s.io
spec:
  service:
    name: metrics-server
    namespace: kube-system
  group: metrics.k8s.io
  version: v1beta1
  insecureSkipTLSVerify: true
  groupPriorityMinimum: 100
  versionPriority: 100

1.5 metrics-server-deployment.yaml

[root@master1 1.8+]# cat metrics-server-deployment.yaml 
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server
      volumes:
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
      - name: tmp-dir
        emptyDir: {}
      containers:
      - name: metrics-server
        image: 192.168.192.234:888/metrics-server-amd64:v0.3.1
        args:
        - --metric-resolution=30s
        #- --kubelet-port=10250
        #- --deprecated-kubelet-completely-insecure=true
        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        imagePullPolicy: Always
        volumeMounts:
        - name: tmp-dir
          mountPath: /tmp

1.6 metrics-server-service.yaml

[root@master1 1.8+]# cat metrics-server-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    kubernetes.io/name: "Metrics-server"
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: metrics-server
  ports:
  - port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30968
  type: NodePort

1.7 resource-reader.yaml

[root@master1 1.8+]# cat resource-reader.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system

2、安装

[root@master1 metrics-server]# kubectl apply -f .

3、验证和测试

查看运行情况

[root@master1 metrics-server]# kubectl get pods -n kube-system  -l k8s-app=metrics-server
NAME                                     READY   STATUS    RESTARTS   AGE
metrics-server-v0.3.1-648c64499d-94x7l   1/1     Running   0          2m11s
[root@master1 metrics-server]# kubectl get svc -n kube-system   -l kubernetes.io/name=Metrics-server
NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
metrics-server                   ClusterIP   10.244.9.135   <none>        443/TCP   3m25s
3.2 查看metric内容
[root@master1 yaml]# curl -sSL --cacert /etc/kubernetes/cert/ca.pem --cert /opt/k8s/work/cert/admin.pem --key /opt/k8s/work/cert/admin-key.pem https://127.0.0.1:8443/apis/metrics.k8s.io/v1beta1/nodes/   | jq .
[root@master1 yaml]# curl -sSL --cacert /etc/kubernetes/cert/ca.pem --cert /opt/k8s/work/cert/admin.pem --key /opt/k8s/work/cert/admin-key.pem https://127.0.0.1:8443/apis/metrics.k8s.io/v1beta1/pods/   | jq .
[root@master1 ~]# kubectl get --raw "/apis/metrics.k8s.io/v1beta1" | jq .
[root@master1 ~]# kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" | jq .
[root@master1 ~]# kubectl top nodes
NAME      CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
master1   110m         2%     832Mi           10%       
master2   104m         2%     4406Mi          57%       
master3   111m         2%     4297Mi          55% 

参考文档:
https://github.com/kubernetes-incubator/metrics-server/issues/40
https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/
https://github.com/kubernetes-incubator/metrics-server/issues/25

Kubernetes是一个用于容器编排和管理的开源平台,它提供了一种灵活、可扩展的方式来管理应用程序的部署、伸缩和升级。Kubernetes实践指南是一个旨在帮助用户快速上手和运用Kubernetes指南手册。 Kubernetes实践指南首先介绍了Kubernetes的基本概念和架构。它解释了Kubernetes中的重要组件,如Pod、ReplicaSet和Deployment,并说明了它们之间的关系和作用。在理解了Kubernetes的核心概念后,指南进一步讨论了Kubernetes的架构和工作原理,包括Master节点和Worker节点的角色和功能。 接下来,指南详细介绍了在Kubernetes中运行应用程序的过程。它解释了如何创建容器镜像,并使用Kubernetes的命令行工具或YAML文件来定义和部署应用程序。指南还介绍了如何执行应用程序的伸缩和升级,以适应不同的负载和业务需求。 在介绍了应用程序的部署和管理后,指南还涵盖了Kubernetes的高级话题。它提供了一些实用的指导和建议,如如何创建和管理Kubernetes集群、如何进行监控和日志管理、如何配置和管理网络等。指南还介绍了一些常见的Kubernetes最佳实践,以帮助用户提高应用程序的性能和可靠性。 最后,Kubernetes实践指南还包括了一些使用Kubernetes的示例和案例研究。它展示了如何利用Kubernetes来构建和管理各种类型的应用程序,包括Web应用、微服务架构和大数据应用等。这些案例研究可以帮助用户更好地理解和应用Kubernetes,以满足自己的特定业务需求。 总而言之,Kubernetes实践指南是一个全面而实用的手册,旨在帮助用户深入了解和掌握Kubernetes的使用和管理。无论是初学者还是有经验的用户,都可以从中获得有关Kubernetes的必要知识和实践指导,以提高应用程序的部署效率和可靠性。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值