《kubernetes-1.8.0》08-addon-dashboard

《kubernetes-1.8.0》08-addon-dashboard

《kubernetes 1.8.0 测试环境安装部署》

时间:2017-11-23

一、修改dashboard配置

下载yaml:

$ mkdir ~/dashboard
$ cd ~/dashboard
$ wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

##镜像问题(从google上已经pull好镜像,做成tarball)

这里贴上本例用的kubernetes-dashboard.yaml

# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.7.
#
# Example usage: kubectl create -f <this_file>

# ------------------- Dashboard Secret ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---
# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create and watch for changes of 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create", "watch"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      initContainers:
      - name: kubernetes-dashboard-init
        image: gcr.io/google_containers/kubernetes-dashboard-init-amd64:v1.0.1
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
      containers:
      - name: kubernetes-dashboard
        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --tls-key-file=/certs/dashboard.key
          - --tls-cert-file=/certs/dashboard.crt
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          readOnly: true
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

部署yaml文件:


$ kubectl create -f kubernetes-dashboard.yaml

#查看状态:
[root@node-131 dashboard]# kubectl get pods -n kube-system -o wide
NAME                                      READY     STATUS    RESTARTS   AGE       IP               NODE
...
kubernetes-dashboard-7f5d4d5785-hqq5v     1/1       Running   0          1m        192.168.188.5    node.133

[root@node-131 dashboard]# kubectl get svc -n kube-system -o wide
NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE       SELECTOR
...
kubernetes-dashboard   ClusterIP   10.254.219.101   <none>        443/TCP         2m        k8s-app=kubernetes-dashboard

二、创建unsecure port代理

可能由于tls的原因,导致访问安全端口的api提示未授权:

{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {

  },
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}

先放一边,通过kube proxy开启 insecure port的物理接口侦听:

[root@node-131 dashboard]#  nohup kubectl proxy --address=0.0.0.0 --accept-hosts='^*$' > /tmp/proxy.log 2>&1 &

[root@node-131 dashboard]# jobs 
[1]+  Running                 nohup kubectl proxy --address=0.0.0.0 --accept-hosts='^*$' > /tmp/proxy.log 2>&1 &

三、测试访问

http://${apiserver-address}:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login

缺省支持kubeconfig以及token的方式,选择skip略过。

提示没有任何api访问权限

四、修改anonymous用户的访问权限

官方wiki上找到,直接赋予admin privileges的yaml。

cat > dashboard-admin.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
EOF

kubectl create -f dashboard-admin.yaml

刷新页面:

算是看到效果吧。
关于(token验证、basic-auth验证、kubeconfig验证等),往后翻。。

五、heapster搭建

创建三个yaml文件并:

1、grafana.yaml

mkdir ~/heapster
cd ~/heapster
cat > grafana.yaml << EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-grafana
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: grafana
    spec:
      containers:
      - name: grafana
        image: gcr.io/google-containers/heapster-grafana-amd64:v4.4.3
        ports:
        - containerPort: 3000
          protocol: TCP
        volumeMounts:
        - mountPath: /etc/ssl/certs
          name: ca-certificates
          readOnly: true
        - mountPath: /var
          name: grafana-storage
        env:
        - name: INFLUXDB_HOST
          value: monitoring-influxdb
        - name: GF_SERVER_HTTP_PORT
          value: "3000"
          # The following env variables are required to make Grafana accessible via
          # the kubernetes api-server proxy. On production clusters, we recommend
          # removing these env variables, setup auth for grafana, and expose the grafana
          # service using a LoadBalancer or a public IP.
        - name: GF_AUTH_BASIC_ENABLED
          value: "false"
        - name: GF_AUTH_ANONYMOUS_ENABLED
          value: "true"
        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
          value: Admin
        - name: GF_SERVER_ROOT_URL
          # If you're only using the API Server proxy, set this value instead:
          # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
          value: /
      volumes:
      - name: ca-certificates
        hostPath:
          path: /etc/ssl/certs
      - name: grafana-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP.
  # type: LoadBalancer
  # You could also use NodePort to expose the service at a randomly-generated port
  # type: NodePort
  ports:
  - port: 80
    targetPort: 3000
  selector:
    k8s-app: grafana
EOF

2、heapster.yaml

cat > heapster.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: gcr.io/google-containers/heapster-amd64:v1.4.0
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://kubernetes.default
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
EOF

3、influxdb.yaml

cat > influxdb.yaml << EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-influxdb
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: influxdb
    spec:
      containers:
      - name: influxdb
        image: gcr.io/google-containers/heapster-influxdb-amd64:v1.3.3
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      volumes:
      - name: influxdb-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-influxdb
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - port: 8086
    targetPort: 8086
  selector:
    k8s-app: influxdb
EOF

4、heapster-rbac.yaml:

cat > heapster-rbac.yaml << EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:heapster
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system
EOF

上述yaml来源:https://github.com/kubernetes/heapster/tree/master/deploy/kube-config/

部署yaml文件:

kubectl create -f heapster-rbac.yaml
kubectl create -f grafana.yaml
kubectl create -f heapster.yaml 
kubectl create -f influxdb.yaml

查看pod是否running:


[root@node-131 heapster]# kubectl get pods -n kube-system
NAME                                      READY     STATUS    RESTARTS   AGE
...
heapster-546b844c7d-8fmpb                 1/1       Running   0          57s
...
monitoring-grafana-c659796d8-lhxct        1/1       Running   0          1m
monitoring-influxdb-7db47d84b-m7jqg       1/1       Running   0          52s

查看dashboard是否出图:

查看grafana是否出图:

访问:http://${apiserver-addree}:8001/api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/

至此,遗留一堆问题的 dashboard 勉强算是搭建完成:

关于遗留问题的解决

之前做的时候发现dashboard在雁阵的时候点击确认完全没反应,后来发现是由于1.7.1版本的dashboard不支持http的方式进行验证(本例中用的是kube-proxy的方式将apiserver的非安全端口暴露出去),觉得应该dashboard利用了https的验证头进行身份验证。

还好github上提供了另一种暴露dashboard端口的方法–> Accessing-Dashboard—1.7.X-and-above

里头说明了应该这样做:

照样子将在线修改一下kubernetes-dashboard的service,将type: ClusterIP 更改为 type: NodePort

查看一下暴露的端口:

[root@node-131 traefik]# kubectl get svc -n kube-system
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                       AGE
...
kubernetes-dashboard      NodePort    10.254.129.201   <none>        443:35751/TCP                 2d
...

删掉原来缺省提权的的rbac(往前翻翻那找到那个dashboard-admin.yaml删掉它):

cd  ~/dashboard/
kubectl delete -f dashboard-admin.yaml

用https访问(skip):

暴一堆权限问题:


尝试用token方式登录:


##创建dashboard使用的ServiceAccount(admin):
kubectl create sa admin

##查看创建的sa
[root@node-131 ~]# kubectl describe sa  admin 
Name:         admin
Namespace:    default
Labels:       <none>
Annotations:  <none>

Image pull secrets:  <none>

Mountable secrets:   admin-token-2dzj9

Tokens:              admin-token-2dzj9

Events:  <none>
###用户名字段为admin,对应的secrets:   admin-token-2dzj9

##创建clusterrolebinding,将用户admin(system:serviceaccount:default:admin) binding到cluster-admin这个clusterrole

[root@node-131 ~]# kubectl create clusterrolebinding admin --clusterrole=cluster-admin --user=system:serviceaccount:default:admin

###查看clusterrolebindin
[root@node-131 ~]# kubectl describe clusterrolebinding admin
Name:         admin
Labels:       <none>
Annotations:  <none>
Role:
  Kind:  ClusterRole
  Name:  cluster-admin
Subjects:
  Kind  Name                                 Namespace
  ----  ----                                 ---------
  User  system:serviceaccount:default:admin 

##查看serviceaccoun admin对应的token:
[root@node-131 ~]# kubectl describe clusterrolebinding admin
Name:         admin
Labels:       <none>
Annotations:  <none>
Role:
  Kind:  ClusterRole
  Name:  cluster-admin
Subjects:
  Kind  Name                                 Namespace
  ----  ----                                 ---------
  User  system:serviceaccount:default:admin  
[root@node-131 ~]# kubectl describe secrets admin-token-2dzj9
Name:         admin-token-2dzj9
Namespace:    default
Labels:       <none>
Annotations:  kubernetes.io/service-account.name=admin
              kubernetes.io/service-account.uid=011ffef5-d67b-11e7-8c72-005056bc80ed

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     2053 bytes
namespace:  7 bytes
token:      eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.lcy5pby9zZXJ2aWNlYWNjb...


将对应的token复制到登录框中点击登录即可:

成功获得所有权限:


basic auth的验证方式:

修改kubernetes-dashboard.yaml文件,添加args:

修改所有apiserver配置文件(/etc/kubenetes/apiserver)

添加:

##由于应用了RBAC,所以要找一个有权限的用户 就找这个拥有最大权限的cluster-admin
[root@node-131 ~]# kubectl get clusterrole
NAME                                                                   AGE
...
cluster-admin                                                          8d
...

##看一下对应的clusterrolebinding
[root@node-131 ~]# kubectl get clusterrolebinding
NAME                                           AGE
...
cluster-admin                                  8d
...

##看一下具体的描述
[root@node-131 ~]# kubectl describe clusterrolebinding cluster-admin
Name:         cluster-admin
Labels:       kubernetes.io/bootstrapping=rbac-defaults
Annotations:  rbac.authorization.kubernetes.io/autoupdate=true
Role:
  Kind:  ClusterRole
  Name:  cluster-admin
Subjects:
  Kind   Name            Namespace
  ----   ----            ---------
  Group  system:masters  
##这里说明了 组system:masters 将会被binding到cluster-admin这个clusterRole

创建对应的basic-auth-file:

格式:password,user,uid,”group1,group2,group3”

[root@node-131 ~]# cat /srv/kubernetes/basic_auth.csv
cluster-admin,cluster-admin,2,system:masters

!!!特别注意!!!,basic_auth.csv文件中,group字段必须写成system:masters,因为验证通过后组字段将传递给apiserver,后续clusterrolebindingcluster-admin 这个clusterrole 继而获得所有resources的访问权限。

重启apiserver:

systemctl restart kube-apiserver

尝试用cluster-admin账户登陆:

登陆成功后将没有告警,并具备最高权限:

本系列其他内容:

参考链接:

http://tonybai.com/2017/09/26/some-notes-about-deploying-kubernetes-dashboard-1-7-0/

https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard—1.7.X-and-above

https://github.com/kubernetes/heapster/blob/master/deploy/kube-config/

  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 10
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 10
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值