环境配置
IP hostname 操作系统 10.11.66.44 k8s-master centos7.6 10.11.66.27 k8s-node1 centos7.7 10.11.66.28 k8s-node2 centos7.7
[ root@localhost ~]
[ root@localhost ~]
[ root@localhost ~]
[ root@k8s-master ~]
CentOS Linux release 7.6 .1810 ( Core)
[ root@k8s-master ~]
SELinux status: disabled
[ root@k8s-master ~]
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded ( /usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive ( dead)
Docs: man:firewalld( 1 )
[ root@k8s-master ~]
> 10.11 .66.44 k8s-master
> 10.11 .66.27 k8s-node1
> 10.11 .66.28 k8s-node2
> EOF
[ root@k8s-master ~]
PING k8s-master ( 10.11 .66.44) 56 ( 84 ) bytes of data.
64 bytes from k8s-master ( 10.11 .66.44) : icmp_seq = 1 ttl = 64 time = 0.012 ms
64 bytes from k8s-master ( 10.11 .66.44) : icmp_seq = 2 ttl = 64 time = 0.016 ms
^C
--- k8s-master ping statistics ---
2 packets transmitted, 2 received, 0 % packet loss, time 1001ms
rtt min/avg/max/mdev = 0.012 /0.014/0.016/0.002 ms
[ root@k8s-master ~]
PING k8s-node1 ( 10.11 .66.27) 56 ( 84 ) bytes of data.
64 bytes from k8s-node1 ( 10.11 .66.27) : icmp_seq = 1 ttl = 64 time = 0.924 ms
64 bytes from k8s-node1 ( 10.11 .66.27) : icmp_seq = 2 ttl = 64 time = 1.36 ms
^C
--- k8s-node1 ping statistics ---
2 packets transmitted, 2 received, 0 % packet loss, time 1010ms
rtt min/avg/max/mdev = 0.924 /1.146/1.369/0.225 ms
[ root@k8s-master ~]
PING k8s-node2 ( 10.11 .66.28) 56 ( 84 ) bytes of data.
64 bytes from k8s-node2 ( 10.11 .66.28) : icmp_seq = 1 ttl = 64 time = 1.18 ms
64 bytes from k8s-node2 ( 10.11 .66.28) : icmp_seq = 2 ttl = 64 time = 1.30 ms
^C
--- k8s-node2 ping statistics ---
2 packets transmitted, 2 received, 0 % packet loss, time 1003ms
rtt min/avg/max/mdev = 1.180 /1.240/1.300/0.060 ms
[ root@k8s-master ~]
1 : lo: < LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2 : eth0: < BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:26:38:13 brd ff:ff:ff:ff:ff:ff
[ root@k8s-master ~]
07B64D56-0D8B-6047-8E55-9ADE9F263813
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-node2 ~]
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF
[ root@k8s-master ~]
[ root@k8s-master ~]
net.bridge.bridge-nf-call-iptables= 1
net.bridge.bridge-nf-call-ip6tables= 1
net.ipv4.ip_forward= 1
net.ipv4.tcp_tw_recycle= 0
vm.swappiness= 0
vm.overcommit_memory= 1
vm.panic_on_oom= 0
fs.inotify.max_user_watches= 89100
fs.file-max= 52706963
fs.nr_open= 52706963
net.ipv6.conf.all.disable_ipv6= 1
net.netfilter.nf_conntrack_max= 2310720
EOF
[ root@k8s-master ~]
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.ipv6.conf.all.disable_ipv6 = 1
net.netfilter.nf_conntrack_max = 2310720
- overcommit_memory = 0 '表示内核将检查是否有足够的可用内存供应用进程使用;如果有足够的可用内存,内存申请允许;否则,内存申请失败,并把错误返回给应用进程。
- overcommit_memory=1 ' 表示内核允许分配所有的物理内存,而不管当前的内存状态如何。
- overcommit_memory = 2 '表示内核允许分配超过所有物理内存和交换空间总和的内存
部署docker
[ root@k8s-master ~]
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
Loaded plugins: fastestmirror
No Match for argument: docker
No Match for argument: docker-client
No Match for argument: docker-client-latest
No Match for argument: docker-common
No Match for argument: docker-latest
No Match for argument: docker-latest-logrotate
No Match for argument: docker-logrotate
No Match for argument: docker-selinux
No Match for argument: docker-engine-selinux
No Match for argument: docker-engine
No Packages marked for removal
[ root@k8s-master ~]
[ root@k8s-master ~]
Loaded plugins: fastestmirror
adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[ root@k8s-master ~]
[ root@k8s-master ~]
{
"registry-mirrors" : [ "https://bk6kzfqm.mirror.aliyuncs.com" ] ,
"exec-opts" : [ "native.cgroupdriver=systemd" ] ,
"log-driver" : "json-file" ,
"log-opts" : {
"max-size" : "100m"
} ,
"storage-driver" : "overlay2" ,
"storage-opts" : [
"overlay2.override_kernel_check=true"
]
}
EOF
[ root@k8s-master ~]
[ root@k8s-master ~]
部署kubeadm和kubelet
[ root@k8s-master ~]
[ kubernetes]
name = Kubernetes
baseurl = https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled = 1
gpgcheck = 0
repo_gpgcheck = 0
gpgkey = https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[ root@k8s-master ~]
[ root@k8s-master ~]
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
W0803 15 :10:18.910528 25638 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [ kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.18.6
k8s.gcr.io/kube-controller-manager:v1.18.6
k8s.gcr.io/kube-scheduler:v1.18.6
k8s.gcr.io/kube-proxy:v1.18.6
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
[ root@k8s-master ~]
KUBE_VERSION = v1.18.6
PAUSE_VERSION = 3.2
CORE_DNS_VERSION = 1.6 .7
ETCD_VERSION = 3.4 .3-0
docker pull kubeimage/kube-proxy-amd64:$KUBE_VERSION
docker pull kubeimage/kube-controller-manager-amd64:$KUBE_VERSION
docker pull kubeimage/kube-apiserver-amd64:$KUBE_VERSION
docker pull kubeimage/kube-scheduler-amd64:$KUBE_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION
docker tag kubeimage/kube-proxy-amd64:$KUBE_VERSION k8s.gcr.io/kube-proxy:$KUBE_VERSION
docker tag kubeimage/kube-controller-manager-amd64:$KUBE_VERSION k8s.gcr.io/kube-controller-manager:$KUBE_VERSION
docker tag kubeimage/kube-apiserver-amd64:$KUBE_VERSION k8s.gcr.io/kube-apiserver:$KUBE_VERSION
docker tag kubeimage/kube-scheduler-amd64:$KUBE_VERSION k8s.gcr.io/kube-scheduler:$KUBE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION k8s.gcr.io/coredns:$CORE_DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION k8s.gcr.io/etcd:$ETCD_VERSION
docker rmi kubeimage/kube-proxy-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-controller-manager-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-apiserver-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-scheduler-amd64:$KUBE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION
[ root@k8s-master ~]
'或者
[ root@k8s-master ~]
[ root@k8s-node1 ~]
初始化集群
[ root@k8s-master ~]
--kubernetes-version= v1.18.6 : 加上该参数后启动相关镜像(刚才下载的那一堆)
--pod-network-cidr= 10.244 .0.0/16 :(Pod 中间网络通讯我们用flannel,flannel要求是10.244.0.0/16,这个IP段就是Pod的IP段)
--service-cidr= 10.1 .0.0/16 : Service(服务)网段(和微服务架构有关)
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) : $( id -g) $HOME /.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.11 .66.44:6443 --token ecqlbq.1k41wwa3gn57oonq \
--discovery-token-ca-cert-hash sha256:daeec6df945f3f4a646d074d9f9144f414373106ff8849450c1d10b5a663e87e
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-66bff467f8-cxtrj 0 /1 Pending 0 8m14s < none> < none> < none> < none>
kube-system coredns-66bff467f8-znlm2 0 /1 Pending 0 8m14s < none> < none> < none> < none>
kube-system etcd-k8s-master 1 /1 Running 0 8m23s 10.11 .66.44 k8s-master < none> < none>
kube-system kube-apiserver-k8s-master 1 /1 Running 0 8m23s 10.11 .66.44 k8s-master < none> < none>
kube-system kube-controller-manager-k8s-master 1 /1 Running 0 8m23s 10.11 .66.44 k8s-master < none> < none>
kube-system kube-proxy-vh964 1 /1 Running 0 8m14s 10.11 .66.44 k8s-master < none> < none>
kube-system kube-scheduler-k8s-master 1 /1 Running 0 8m23s 10.11 .66.44 k8s-master < none> < none>
[ root@k8s-master ~]
NAME READY STATUS RESTARTS AGE
coredns-66bff467f8-cxtrj 0 /1 Pending 0 3m52s
coredns-66bff467f8-znlm2 0 /1 Pending 0 3m52s
etcd-k8s-master 1 /1 Running 0 4m1s
kube-apiserver-k8s-master 1 /1 Running 0 4m1s
kube-controller-manager-k8s-master 1 /1 Running 0 4m1s
kube-proxy-vh964 1 /1 Running 0 3m52s
kube-scheduler-k8s-master 1 /1 Running 0 4m1s
集群网络配置(选择一种就可以)
flannel 网络
[ root@k8s-master ~]
Pod Network(使用七牛云镜像)
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
calico 网络
[ root@k8s-master ~]
[ root@k8s-master ~]
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
[ root@k8s-master ~]
[ root@k8s-master ~]
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-578894d4cd-rchx6 1 /1 Running 0 2m31s
calico-node-slgg9 1 /1 Running 0 2m32s
coredns-66bff467f8-cxtrj 1 /1 Running 0 55m
coredns-66bff467f8-znlm2 1 /1 Running 0 55m
etcd-k8s-master 1 /1 Running 0 55m
kube-apiserver-k8s-master 1 /1 Running 0 55m
kube-controller-manager-k8s-master 1 /1 Running 0 55m
kube-proxy-vh964 1 /1 Running 0 55m
kube-scheduler-k8s-master 1 /1 Running 0 55m
kubernetes集群中添加node节点
[ root@k8s-node1 ~]
--discovery-token-ca-cert-hash sha256:daeec6df945f3f4a646d074d9f9144f414373106ff8849450c1d10b5a663e87e
[ root@k8s-node2 ~]
--discovery-token-ca-cert-hash sha256:daeec6df945f3f4a646d074d9f9144f414373106ff8849450c1d10b5a663e87e
[ root@k8s-master ~]
[ root@k8s-master ~]
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 64m v1.18.6
k8s-node1 Ready < none> 3m37s v1.18.6
k8s-node2 Ready < none> 3m36s v1.18.6
kube-proxy开启ipvs
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master ~]
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
configmap/kube-proxy configured
[ root@k8s-master ~]
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-578894d4cd-rchx6 1 /1 Running 0 14m
calico-node-kfc5p 1 /1 Running 0 7m17s
calico-node-slgg9 1 /1 Running 0 14m
calico-node-xcc92 1 /1 Running 0 7m16s
coredns-66bff467f8-cxtrj 1 /1 Running 0 67m
coredns-66bff467f8-znlm2 1 /1 Running 0 67m
etcd-k8s-master 1 /1 Running 0 67m
kube-apiserver-k8s-master 1 /1 Running 0 67m
kube-controller-manager-k8s-master 1 /1 Running 0 67m
kube-proxy-6fnpb 1 /1 Running 0 16s
kube-proxy-tflld 1 /1 Running 0 20s
kube-proxy-x47c8 1 /1 Running 0 26s
kube-scheduler-k8s-master 1 /1 Running 0 67m
部署 kubernetes-dashboard
cat > recommended.yaml <<- EOF
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard
---
#apiVersion: v1
#kind: Secret
#metadata:
# labels:
# k8s-app: kubernetes-dashboard
# name: kubernetes-dashboard-certs
# namespace: kubernetes-dashboard
#type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-beta1
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: kubernetes-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
name: kubernetes-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-metrics-scraper
template:
metadata:
labels:
k8s-app: kubernetes-metrics-scraper
spec:
containers:
- name: kubernetes-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.0
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
EOF
创建证书
[ root@k8s-master ~]
[ root@k8s-master kubernetes]
[ root@k8s-master kubernetes]
[ root@k8s-master dashboard-certs]
namespace/kubernetes-dashboard created
[ root@k8s-master dashboard-certs]
NAME STATUS AGE
default Active 75m
kube-node-lease Active 75m
kube-public Active 75m
kube-system Active 75m
kubernetes-dashboard Active 9s
[ root@k8s-master dashboard-certs]
Generating RSA private key, 2048 bit long modulus
.. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. .. +++
.. .. .. .. .. +++
e is 65537 ( 0x10001)
[ root@k8s-master dashboard-certs]
[ root@k8s-master dashboard-certs]
Signature ok
subject = /CN= dashboard-cert
Getting Private key
[ root@k8s-master dashboard-certs]
secret/kubernetes-dashboard-certs created
[ root@k8s-master dashboard-certs]
NAMESPACE NAME TYPE DATA AGE
default default-token-j6m5t kubernetes.io/service-account-token 3 77m
kube-node-lease default-token-n5lxf kubernetes.io/service-account-token 3 77m
.. .. .. .. .
.. .. .. .. .
kubernetes-dashboard default-token-bjp2p kubernetes.io/service-account-token 3 2m33s
kubernetes-dashboard kubernetes-dashboard-certs Opaque 2 90s
创建 dashboard 管理员
[ root@k8s-master dashboard-certs]
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: dashboard-admin
namespace: kubernetes-dashboard
EOF
[ root@k8s-master dashboard-certs]
serviceaccount/dashboard-admin created
为用户分配权限
[ root@k8s-master dashboard-certs]
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin-bind-cluster-role
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
EOF
[ root@k8s-master dashboard-certs]
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin-bind-cluster-role created
安装 Dashboard
[ root@k8s-master dashboard-certs]
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
namespace/kubernetes-dashboard configured
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/kubernetes-metrics-scraper created
[ root@k8s-master dashboard-certs]
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-578894d4cd-rchx6 1 /1 Running 0 29m
kube-system calico-node-kfc5p 1 /1 Running 0 22m
kube-system calico-node-slgg9 1 /1 Running 0 29m
kube-system calico-node-xcc92 1 /1 Running 0 22m
kube-system coredns-66bff467f8-cxtrj 1 /1 Running 0 82m
kube-system coredns-66bff467f8-znlm2 1 /1 Running 0 82m
kube-system etcd-k8s-master 1 /1 Running 0 82m
kube-system kube-apiserver-k8s-master 1 /1 Running 0 82m
kube-system kube-controller-manager-k8s-master 1 /1 Running 0 82m
kube-system kube-proxy-6fnpb 1 /1 Running 0 15m
kube-system kube-proxy-tflld 1 /1 Running 0 15m
kube-system kube-proxy-x47c8 1 /1 Running 0 15m
kube-system kube-scheduler-k8s-master 1 /1 Running 0 82m
kubernetes-dashboard kubernetes-dashboard-84b6b4578b-8t9bp 1 /1 Running 0 75s
kubernetes-dashboard kubernetes-metrics-scraper-86f6785867-bqvpg 1 /1 Running 0 75s
[ root@k8s-master dashboard-certs]
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT( S) AGE SELECTOR
dashboard-metrics-scraper ClusterIP 10.1 .16.181 < none> 8000 /TCP 2m6s k8s-app= kubernetes-metrics-scraper
kubernetes-dashboard NodePort 10.1 .99.111 < none> 443 :30000/TCP 2m6s k8s-app= kubernetes-dashboard
查看并复制用户Token
[ root@k8s-master dashboard-certs]
Name: dashboard-admin-token-528w2
Namespace: kubernetes-dashboard
Labels: < none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: 7c3955d3-2c0c-4b99-b69b-8a3f330661de
Type: kubernetes.io/service-account-token
Data
== ==
ca.crt: 1025 bytes
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ik1oVnpzUlUzRU4zbXJRV2F5VUZMc3JmYWFBTWMyWU1IenY1d1NET1U0bDgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNTI4dzIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiN2MzOTU1ZDMtMmMwYy00Yjk5LWI2OWItOGEzZjMzMDY2MWRlIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.nVS3WCiIU90o5WIYG9iHYE90Gfox_Q5eHNzz3UsGLDDBIfgDt7veX-4pl7GLV8FFsAap0fTLo_pU7sbehd5mOYcgh_QRlZ3ELR4mVZYNW6fmPBFZn7Tbjv7LLieGDPzELrefQJwS4sZus2WsH1OdQbMIry6AYKpl5AAKw4rhh_679QnEBjCsJiEebg0hzlKyXoXGqmaGwfetsCB5DOmoNss2WbIKfGJ7pasTTKa29F3T19NIh9VbDmavyvYZp9VPgfcKiuBKlxrakzwH9fosS8V3faMgH64CMIWwrEqv1cybd85gQkA1u0SGZ5mOQJ3tYWGHGJBFlO8J-RKSo8gJOw
访问测试
1 、浏览器访问:https://10.11.66.44:30000/
2 、选择token,输入上面输出的token
用文件认证登录
导出认证
[ root@k8s-master dashboard-certs]
Name: dashboard-admin-token-528w2
Namespace: kubernetes-dashboard
Labels: < none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: 7c3955d3-2c0c-4b99-b69b-8a3f330661de
Type: kubernetes.io/service-account-token
Data
== ==
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ik1oVnpzUlUzRU4zbXJRV2F5VUZMc3JmYWFBTWMyWU1IenY1d1NET1U0bDgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNTI4dzIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiN2MzOTU1ZDMtMmMwYy00Yjk5LWI2OWItOGEzZjMzMDY2MWRlIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.nVS3WCiIU90o5WIYG9iHYE90Gfox_Q5eHNzz3UsGLDDBIfgDt7veX-4pl7GLV8FFsAap0fTLo_pU7sbehd5mOYcgh_QRlZ3ELR4mVZYNW6fmPBFZn7Tbjv7LLieGDPzELrefQJwS4sZus2WsH1OdQbMIry6AYKpl5AAKw4rhh_679QnEBjCsJiEebg0hzlKyXoXGqmaGwfetsCB5DOmoNss2WbIKfGJ7pasTTKa29F3T19NIh9VbDmavyvYZp9VPgfcKiuBKlxrakzwH9fosS8V3faMgH64CMIWwrEqv1cybd85gQkA1u0SGZ5mOQJ3tYWGHGJBFlO8J-RKSo8gJOw
ca.crt: 1025 bytes
[ root@k8s-master ~]
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJZmk1aXZZNkxXb0F3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBNE1ETXdOelF5TlRoYUZ3MHlNVEE0TURNd056UXpNREJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXpLazFvSnVPenQ3R3kzWnIKYjY5UkFqOXpzZ0hsNDdBOVVGOGIvQm1oYjVZalAwNTZuSG5FUVg4Qi85eDRaQmI0U2VLOTZkVVhIaTlFcEZuUQpDUlNKTFUwNnFRcW1GeUdXc1JJcEJPVDlUQmtrSW1XM25aRFZvKzI2dWFnVEp0V1BsOWtaWHZ5Z1hGUkJxeDNYCkxvTHIwZ2FrWE56dWd6TzBhMnFwQ1hQK0xmTE1Pa2gzUlJRZmQ4NUtaWWFXcWhNSStjNkZEVGtnTi84Z3BNKzYKWkE0a0UzT0x3OWFORkpvakl2amNIY1h5N0RNdGxCaFVRZVU4bEk2NHVRVk9zcDllTDR2WjBFRmo1djZFejNnbwp4ZFYrbzd6NWd3N3pzUENrdlJjc3RRcVhSRnV6emlpTVVQQTRDbzFhZkt3R1VZcmtBbmNzZnQxbVhGb2V3WDFPCjkwQ2xod0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDd0lKc0JreEV4UXBpeW8zTkNmQmkrL3hOQ0U3YnpNLzhmRAp4Q0VwQlZ0MWR1NkU1ZFdJQy82a3B0OVZzNHhHc1gvVVA4aUNaejVHZmtxT1JmTklDM0dZUFZJWlhNTUN2RHp0CnFubkk0Z1p2YXhyMnNoSDNpVkw2Rzd0Y2hCZmNJV0J4K1lnTEt3ZW9iTDUvaUorbXJmT2xsNXV4eit6cGUveHIKTjArWWVsTXJBaS9PeWpJR1N0WjVOblRzcnVILzZVRXRFZUwwRE9WQ0FrR3JQYnlkQVdNQUxaeWlQMTU4bCticQpNRkFkMHc2ZG82R3R2NlRCMGVaaXdzT1RHVzN6Ti85YlZWS2NFcGIzaE1MVVk0YVhvNC9laXl6TnF6MzlDdEpBCklPb3djOEFuakdGRDYraUdKbWU0VVdXcUxzMDI5US82eXF6WWFsUmFqWkwyL2FkNHRuaz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo =
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBektrMW9KdU96dDdHeTNacmI2OVJBajl6c2dIbDQ3QTlVRjhiL0JtaGI1WWpQMDU2Cm5IbkVRWDhCLzl4NFpCYjRTZUs5NmRVWEhpOUVwRm5RQ1JTSkxVMDZxUXFtRnlHV3NSSXBCT1Q5VEJra0ltVzMKblpEVm8rMjZ1YWdUSnRXUGw5a1pYdnlnWEZSQnF4M1hMb0xyMGdha1hOenVnek8wYTJxcENYUCtMZkxNT2toMwpSUlFmZDg1S1pZYVdxaE1JK2M2RkRUa2dOLzhncE0rNlpBNGtFM09MdzlhTkZKb2pJdmpjSGNYeTdETXRsQmhVClFlVThsSTY0dVFWT3NwOWVMNHZaMEVGajV2NkV6M2dveGRWK283ejVndzd6c1BDa3ZSY3N0UXFYUkZ1enppaU0KVVBBNENvMWFmS3dHVVlya0FuY3NmdDFtWEZvZXdYMU85MENsaHdJREFRQUJBb0lCQVFDMHlLZXhkcGZnanhObAp1UFpRVXJvcFZTbDZ6WWhuNTA5U0JxR3V3R2xGSzRkNUxYYkxjQmgzanB5U2lncml4eE9PR0xlUHJZYmRSLzNICmUvcHpldXR0MC9HRVR2N0dJZ3A5NGIvUUxnSzl6TnVKY3ZhT1Bka3FGQjVFVDM2VGFFU09hdHlwZGxpbEZseG4KcmxWZEpaTHdGS1B0ejg3MG9LQzMzaUR4VTcvc2p4MWUwc3FFQ1NMdW5aY2FiaWJtYUpjT2RXYk0yM3JBdEdYQQp0YlFIYVZneHJldEZFREx0Ym9IMFB3Qit3eFNHdFh4WUFwSXR0RkowNWM3QWc1OVhWSFc2akdiYWd2VVlPcDFQCmdGVndSbjdwT1daNlNHTDBqdXgvbTl2UzZoakZ1aVVhVXhkM2ZOSVNKbUljRjZ2MTlmVTQwV3kyYXBCK1B0bHIKOU5zM2RpSGhBb0dCQU01ZW9QcFNGNmp0U1V0NTlERktZdUJUUG9wQWxiZFlxM0QvWnVBQlpkaFdJWXNoS1JvRwpUSGhjaTFlKzBPbmZlZ2pvMzhGM0syaHVJRVdrNEFhQ25QaWVyRWc3Yk1mVjNkMjYyNHBFeGRBN3J5Y1JvaWJuClJlTVA5K1BvVy9IaXJVQW4wUFdyRFUydEpLekxwNlhCcnozeE02VmFiWGxFcnNnZ0pybHN1cEwzQW9HQkFQM2gKWW5QLzVWWHBWeUtvMkhuZEEwWkwwK0pscFhNeFY4NDA4ZE1QMXE1WkVQbkZ2aVNXVjlLdFJVa3lCR2ZDUW1WeApEWkp3KzBRcmZUbXV5elZ6aUFZTFJJbHJKZ285QmN0NmRGUmpFaUo4NkVIeGdlV1J5UkhmaUZqalhqSXlCVGYyCmFxOGM2UlBTZmEyTEh1SVBlZEZVY2lrN0Z5WDg4dzJabkpBcjJFM3hBb0dBWnBOVWtuZkJlTjdRNHFvd2ZWdUwKQUJPQWIzbWdzU3hxc3RUUURxSERQSis3Tm90NkFZeUY4QUdYNVRwY1h4TU1kbWRCNk1qU0U2dEJjVHg5ZWQ3cwpKUXZCZUhuSkhSOHBrMit3ZGU2dklFeTZSOElWQmg5SWRvOVdXTHNERUp6cUhveHI2ZUJtMFdneFpZNG91MVFsClJiV2hSUnhJYzlGMnl0Um9TeHhITklzQ2dZQmRxSFQ2bUMrUmx3aG5KK1RjYUJWYUxJVVpJeWg3SzN2Wi9ad3MKb2M0ditYbVN1MGxmRS91SUpCWElYK1JTSnM3NXYxQWpjdnl1OUdBNUZHdXc1MU1KNzhRejhjeFJ3SnRQcW5nWgozWWFHSkpCR0s0TWhIcndQbE9nbTZwSUljSDJPWEtDVXcxU1UxSFU2dlhVQ0xuVmhMUWNFZ09FVVNaR2N0Y3VWClFDZUc4UUtCZ0UrMkFrZTR3QlRnZDhuZFhlTHRPcHBRZ21IUVViZUN1elZyRzFEVEJxam0rcVpnSzhKR2RUdXIKUDhybjY3TGNFSFpyRlJVODEwQXJUNU92QXRGOTlnU0dnKzd1Q2x5bzJtVGtxZWRIUTZ6RVZld0JUQlFQUEx1VAp6UGRYbjl5cTZSaVZPajU1QUROdmFuNXdQNUE3clRSTGZjNXZqQWRmV3hmYUZqYVIxNE85Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg ==
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ik1oVnpzUlUzRU4zbXJRV2F5VUZMc3JmYWFBTWMyWU1IenY1d1NET1U0bDgifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tNTI4dzIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiN2MzOTU1ZDMtMmMwYy00Yjk5LWI2OWItOGEzZjMzMDY2MWRlIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.nVS3WCiIU90o5WIYG9iHYE90Gfox_Q5eHNzz3UsGLDDBIfgDt7veX-4pl7GLV8FFsAap0fTLo_pU7sbehd5mOYcgh_QRlZ3ELR4mVZYNW6fmPBFZn7Tbjv7LLieGDPzELrefQJwS4sZus2WsH1OdQbMIry6AYKpl5AAKw4rhh_679QnEBjCsJiEebg0hzlKyXoXGqmaGwfetsCB5DOmoNss2WbIKfGJ7pasTTKa29F3T19NIh9VbDmavyvYZp9VPgfcKiuBKlxrakzwH9fosS8V3faMgH64CMIWwrEqv1cybd85gQkA1u0SGZ5mOQJ3tYWGHGJBFlO8J-RKSo8gJOw
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master local]
total 8
drwxr-xr-x. 2 root root 6 Apr 11 2018 bin
drwxr-xr-x. 2 root root 6 Apr 11 2018 etc
drwxr-xr-x. 2 root root 6 Apr 11 2018 games
drwxr-xr-x. 2 root root 6 Apr 11 2018 include
-rw------- 1 root root 6425 Aug 3 17 :48 k8s-dashboard.kubeconfig
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib64
drwxr-xr-x. 2 root root 6 Apr 11 2018 libexec
drwxr-xr-x. 2 root root 6 Apr 11 2018 sbin
drwxr-xr-x. 5 root root 49 Mar 30 2019 share
drwxr-xr-x. 2 root root 6 Apr 11 2018 src
[ root@k8s-master local]
安装部署 metrics-server 插件
链接:https://pan.baidu.com/s/1QRndSG88L5w-_DHfMxrd_g
提取码:62dj
复制这段内容后打开百度网盘手机App,操作更方便哦
[ root@k8s-master ~]
[ root@k8s-master ~]
[ root@k8s-master 1.8 +]
total 28
-rw-r--r-- 1 root root 397 Nov 12 2019 aggregated-metrics-reader.yaml
-rw-r--r-- 1 root root 303 Nov 12 2019 auth-delegator.yaml
-rw-r--r-- 1 root root 324 Nov 12 2019 auth-reader.yaml
-rw-r--r-- 1 root root 298 Nov 12 2019 metrics-apiservice.yaml
-rw-r--r-- 1 root root 1091 Nov 12 2019 metrics-server-deployment.yaml
-rw-r--r-- 1 root root 297 Nov 12 2019 metrics-server-service.yaml
-rw-r--r-- 1 root root 517 Nov 12 2019 resource-reader.yaml
修改安装脚本
[ root@k8s- master 1.8+]
---
apiVersion : v1
kind : ServiceAccount
metadata :
name : metrics- server
namespace : kube- system
---
apiVersion : apps/v1
kind : Deployment
metadata :
name : metrics- server
namespace : kube- system
labels :
k8s-app : metrics- server
spec :
selector :
matchLabels :
k8s-app : metrics- server
template :
metadata :
name : metrics- server
labels :
k8s-app : metrics- server
spec :
serviceAccountName : metrics- server
volumes :
- name : tmp- dir
emptyDir : { }
containers :
- name : metrics- server
image : mirrorgooglecontainers/metrics- server- amd64: v0.3.6
args :
- - - cert- dir=/tmp
- - - secure- port=4443
- - - kubelet- insecure- tls
- - - kubelet- preferred- address- types=InternalIP, ExternalIP, Hostname
ports :
- name : main- port
containerPort : 4443
protocol : TCP
securityContext :
readOnlyRootFilesystem : true
runAsNonRoot : true
runAsUser : 1000
imagePullPolicy : Always
volumeMounts :
- name : tmp- dir
mountPath : /tmp
[ root@k8s-master 1.8 +]
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
[ root@k8s-master 1.8 +]
NAME CPU( cores) CPU% MEMORY( bytes) MEMORY%
k8s-master 887m 22 % 1701Mi 59 %
k8s-node1 158m 7 % 954Mi 35 %
k8s-node2 137m 6 % 894Mi 32 %
[ root@k8s-master 1.8 +]
Error from server ( ServiceUnavailable) : the server is currently unable to handle the request ( get nodes.metrics.k8s.io)
[ root@k8s-master 1.8 +]
[ root@k8s-master 1.8 +]
error: metrics not available yet