环境准备(3台主机执行以下操作):
master:192.168.206.100
node1:192.168.206.101
node2:192.168.206.101
关闭防火墙和selinux
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
关闭swap分区
swapoff -a # 临时关闭
vim /etc/fstab # 注释到swap那一行 永久关闭
添加主机名与IP对应关系
cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.206.100 pro1
192.168.206.101 pro2
192.168.206.102 gra
将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
1、部署docker(所有主机)
docker -v
Docker version 18.06.1-ce, build e68fc7a
Docker配置修改,设置cgroup驱动,这里用systemd
vim /etc/docker/daemon.json
{
"graph": "/data/docker",
"registry-mirrors": ["https://01xxgaft.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
systemctl restart docker
2、添加k8s阿里云YUM软件源(所有主机)
cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
3、安装kubeadm,kubelet和kubectl(所有主机)
yum install -y kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2
systemctl start kubelet
systemctl enable kubelet
4、配置master节点
初始化master节点
kubeadm init --apiserver-advertise-address=192.168.206.100 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.16.0 --service-cidr=10.140.0.0/16 --pod-network-cidr=10.240.0.0/16
网段问题,两个网段不要重,后面是/16,不要与当前机器网段一样
这里执行完会生成一串命令用于node节点的加入,记录下来
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
5、部署node节点
kubeadm join 192.168.206.100:6443 --token ma9t8r.m2pfodr49itz7oai \
--discovery-token-ca-cert-hash sha256:307123455323a3267653ee4cc040c81c24a6d95a44aa47ab5c7e8b257ff98e09
注:由于网络插件还没有部署,还没有准备就绪 NotReady,继续操作。
kubectl get nodes
NAME STATUS ROLES AGE VERSION
gra NotReady <none> 5m31s v1.16.2
pro1 NotReady master 6m29s v1.16.2
pro2 NotReady <none> 5m34s v1.16.2
# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-58cc8c89f4-bm69r 0/1 Pending 0 6m50s
coredns-58cc8c89f4-p2c56 0/1 Pending 0 6m50s
etcd-pro1 1/1 Running 0 5m52s
kube-apiserver-pro1 1/1 Running 0 6m16s
kube-controller-manager-pro1 1/1 Running 0 6m21s
kube-proxy-2xtvx 1/1 Running 0 6m8s
kube-proxy-lzbjr 1/1 Running 0 6m50s
kube-proxy-xgcj2 1/1 Running 0 6m11s
kube-scheduler-pro1 1/1 Running 0 6m15s
6、部署容器网络 (master执行)
Calico是一个纯三层的数据中心网络方案,是目前Kubernetes主流的网络方案。
wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml --no-check-certificate
kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6b94766748-ljllr 0/1 Pending 0 5s
calico-node-8gpfl 0/1 Init:0/3 0 6s
calico-node-r9vxq 0/1 Init:0/3 0 6s
calico-node-vnzf7 0/1 Init:0/3 0 7s
coredns-58cc8c89f4-bm69r 0/1 Pending 0 9m8s
coredns-58cc8c89f4-p2c56 0/1 Pending 0 9m8s
etcd-pro1 1/1 Running 0 8m10s
kube-apiserver-pro1 1/1 Running 0 8m34s
kube-controller-manager-pro1 1/1 Running 0 8m39s
kube-proxy-2xtvx 1/1 Running 0 8m26s
kube-proxy-lzbjr 1/1 Running 0 9m8s
kube-proxy-xgcj2 1/1 Running 0 8m29s
kube-scheduler-pro1 1/1 Running 0 8m33s
执行结束要等上一会才全部running
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6b94766748-ljllr 0/1 ContainerCreating 0 68s
calico-node-8gpfl 0/1 Init:2/3 0 69s
calico-node-r9vxq 0/1 Init:2/3 0 69s
calico-node-vnzf7 0/1 Init:2/3 0 70s
coredns-58cc8c89f4-bm69r 0/1 ContainerCreating 0 10m
coredns-58cc8c89f4-p2c56 0/1 ContainerCreating 0 10m
etcd-pro1 1/1 Running 0 9m13s
kube-apiserver-pro1 1/1 Running 0 9m37s
kube-controller-manager-pro1 1/1 Running 0 9m42s
kube-proxy-2xtvx 1/1 Running 0 9m29s
kube-proxy-lzbjr 1/1 Running 0 10m
kube-proxy-xgcj2 1/1 Running 0 9m32s
kube-scheduler-pro1 1/1 Running 0 9m36s
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6b94766748-ljllr 1/1 Running 0 8m10s
calico-node-8gpfl 1/1 Running 0 8m11s
calico-node-r9vxq 1/1 Running 0 8m11s
calico-node-vnzf7 1/1 Running 0 8m12s
coredns-58cc8c89f4-bm69r 1/1 Running 0 17m
coredns-58cc8c89f4-p2c56 1/1 Running 0 17m
etcd-pro1 1/1 Running 0 16m
kube-apiserver-pro1 1/1 Running 0 16m
kube-controller-manager-pro1 1/1 Running 0 16m
kube-proxy-2xtvx 1/1 Running 0 16m
kube-proxy-lzbjr 1/1 Running 0 17m
kube-proxy-xgcj2 1/1 Running 0 16m
kube-scheduler-pro1 1/1 Running 0 16m
kubectl get nodes
NAME STATUS ROLES AGE VERSION
gra Ready <none> 12m v1.16.2
pro1 Ready master 13m v1.16.2
pro2 Ready <none> 12m v1.16.2
7、部署kubernetes-dashboard UI页面
下载镜像(所有主机)
docker pull registry.cn-shanghai.aliyuncs.com/hanchenglei/metrics-server-amd64:v0.3.6
创建yaml文件(master主机)
mkdir /data/k8s/WEB-UI
cd WEB-UI/
vim components.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
command:
- /metrics-server
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls
ports:
- name: main-port
containerPort: 4443
protocol: TCP
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: tmp-dir
mountPath: /tmp
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/arch: "amd64"
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: main-port
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
kubectl create -f components.yaml
kubectl get pods -n kube-system
UI页面创建
vim recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.3
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
kubectl get ns
kubectl apply -f recommended.yaml
kubectl get ns
kubectl get pod -n kubernetes-dashboard
kubectl proxy
curl http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
查看登录用户的token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep kubernetes-dashboard | awk '{print $1}')
Name: kubernetes-dashboard-certs
Namespace: kubernetes-dashboard
Labels: k8s-app=kubernetes-dashboard
Annotations:
Type: Opaque
Data
====
Name: kubernetes-dashboard-csrf
Namespace: kubernetes-dashboard
Labels: k8s-app=kubernetes-dashboard
Annotations:
Type: Opaque
Data
====
csrf: 256 bytes
Name: kubernetes-dashboard-key-holder
Namespace: kubernetes-dashboard
Labels: <none>
Annotations: <none>
Type: Opaque
Data
====
priv: 1679 bytes
pub: 459 bytes
Name: kubernetes-dashboard-token-bqs2s
Namespace: kubernetes-dashboard
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard
kubernetes.io/service-account.uid: 14f9df1a-a395-47cd-a5d0-1b1f9aba63d4
Type: kubernetes.io/service-account-token
Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlN6aFBqRUVOSncyN1g3TFlsam5Ja1gxUkZwbGNsMDhwS3lFalF0MWUtNXcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1icXMycyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjE0ZjlkZjFhLWEzOTUtNDdjZC1hNWQwLTFiMWY5YWJhNjNkNCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.TrT2FSOhW1boaDALa7te7UFmVTKoxyTlGjU_By4RqUV4x3f4ROAxRyFVLsV8CUhJn4ARsHm1p2x7zPUn21kh_SNuBxgJ4ScT-K5uB522gCEGS8H_1wlu-sasNJHYp92kpvjbj5jA3bRGNZNq7OZir6-8cUttM4GH2HORjmcGZ6T1PIzpdHmPZjmboraXHPfVo9LqVUsGTyUZA62rbYcTUuOvu2XCLKLeFbO5KEawByJ2Zsp5bVqLsUQSKtCyg1fGfSBvLP0C4_UqvRYIli5OphwMgyMUvR0etSL4EFqpvmshOCzDWo3QiU8BrP9zRQj7DTM_YcXis_CfkZ_jOorAjA
ca.crt: 1025 bytes
namespace: 20 bytes
登录https://192.168.206.100:30443,输入token,进入页面但是无数据,pod报RABC权限问题,继续操作
8、kubernetes-dashboard UI页面无数据解决
将kubernetes-dashboard绑定cluster-admin权限,编写yaml文件
vim kubernetes-dashboard-admin_clusterroles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
kubectl apply -f kubernetes-dashboard-admin_clusterroles.yaml
可能会出现以下报错:
The ClusterRoleBinding "kubernetes-dashboard" is invalid: roleRef: Invalid value: rbac.RoleRef{APIGroup:"rbac.authorization.k8s.io", Kind:"ClusterRole", Name:"cluster-admin"}: cannot change roleRef
解决:
kubectl delete -f kubernetes-dashboard-admin_clusterroles.yaml
clusterrolebinding.rbac.authorization.k8s.io "kubernetes-dashboard" deleted
kubectl apply -f kubernetes-dashboard-admin_clusterroles.yaml
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
登陆WEB-UI页面,数据恢复正常
k8s常用命令
(1)查看pod,service,endpoints,secret等等的状态
kubectl get 组件名 # 例如kubectl get pod 查看详细信息可以加上-o wide 其他namespace的指定 -n namespace名
(2)创建,变更一个yaml文件内资源,也可以是目录,目录内包含一组yaml文件(实际使用中都是以yaml文件为主,直接使用命令创建pod的很少,推荐多使用yaml文件)
kubectl apply -f xxx.yaml # 例如kubectl apply -f nginx.yaml 这里是如果没有则创建,如果有则变更,比create好用
(3)删除一个yaml文件内资源,也可以是目录,目录内包含一组yaml文件
kubectl delete -f xxx.yaml # 例如kubectl delete -f nginx.yaml
(4)查看资源状态,比如有一组deployment内的pod没起来,一般用于pod调度过程出现的问题排查
kubectl describe pod pod名 # 先用kubectl get pod查看 有异常的复制pod名使用这个命令
(5)查看pod日志,用于pod状态未就绪的故障排查
kubectl logs pod名 # 先用kubectl get pod查看 有异常的复制pod名使用这个命令
(6)查看node节点或者是pod资源(cpu,内存资源)使用情况
kubectl top 组件名 # 例如kubectl top node kubectl top pod
(7)进入pod内部
kubectl exec -ti pod名 /bin/bash # 先用kubectl get pod查看 有需要的复制pod名使用这个命令