CKA考试环境部署(k8s-v1.20.10)

实验环境

主机网络信息和组件信息

K8S集群角色IP主机名安装的组件
master192.168.0.10k8s-master-1apiserver、controller-manager、scheduler、etcd、docker、kubectl、kubelet、kube-proxy、calico、coredns、metric-server、dashboard
node192.168.0.11k8s-node-1kubelet、kube-proxy、docker、calico
node192.168.0.12k8s-node-2kubelet、kube-proxy、docker、calico
#系统版本 
	20.04.3 LTS(5.4.0-81-generic)

# 配置
	4GB内存/2vcpu/70G硬盘,开启虚拟化,NAT网络模式

# 网络
	service: 10.0.0.0/16
	pod: 10.70.0.0/16

主机初始化

配置主机名
# master-1
	hostnamectl set-hostname k8s-master-1 && bash

# node-1
	hostnamectl set-hostname k8s-node-1 && bash

# node-2
	hostnamectl set-hostname k8s-node-2 && bash
配置hosts文件
# 三台机器的hosts文件内容如下
root@k8s-master-1:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 boy

192.168.0.10 k8s-master-1
192.168.0.11 k8s-node-1
192.168.0.12 k8s-node-2

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
免密登录
# master	
	ssh-keygen -t rsa
	ssh-copy-id -i .ssh/id_rsa.pub root@k8s-node-1
	ssh-copy-id -i .ssh/id_rsa.pub root@k8s-node-2
关闭防火墙
# master,node
root@k8s-master-1:~# ufw disable 
Firewall stopped and disabled on system startup
关闭交换分区
# master,node
	swapoff -a
	echo "vm.swappiness=0" >> /etc/sysctl.conf && sysctl -p
	sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
	
# ubuntu20.04按上述配置后,仍然出现开机自动加载SWAP分区的情况(原因未知),

# rc-local.service添加参数
cat >> /lib/systemd/system/rc-local.service <<- EOF 
[Install]
WantedBy=multi-user.target
Alias=rc-local.service
EOF

# 添加开机脚本
root@k8s-node-1:~# cat /etc/rc.local 
#!/bin/bash 
swapoff -a


# 添加权限
	chmod a+x /etc/rc.local

# 添加开机启动
	systemctl enable rc-local.service
	

# 重启后,可见swap没有被加载了
root@k8s-master-1:~# free -h
              total        used        free      shared  buff/cache   available
Mem:          3.8Gi       238Mi       3.3Gi       1.0Mi       315Mi       3.4Gi
Swap:            0B          0B          0B
允许 iptables 检查桥接流量

​ 为了让你的 Linux 节点上的 iptables 能够正确地查看桥接流量,你需要确保在你的 sysctl 配置中将 net.bridge.bridge-nf-call-iptables 设置为 1

# master、node节点
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
同步时间
# master、node
# 安装ntpdate
	apt-get install ntpdate

# 设置时区
	timedatectl set-timezone Asia/Shanghai

# 配置crontab
	crontab -e root
	* */1 * * * /usr/sbin/ntpdate time2.aliyun.com
安装docker-ce
# master、node
# 安装必要的一些系统工具
	apt-get -y install apt-transport-https ca-certificates curl software-properties-common

# 安装GPG证书
	curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -

# 写入软件源信息
	add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

# 更新源
	apt-get update

# 查找指定版本的docker-ce
	apt-cache madison docker-ce

# 安装docker-ce
	apt-get install -y docker-ce=5:19.03.15~3-0~ubuntu-focal


# 配置docker
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://ornb7jit.mirror.aliyuncs.com"],
  "default-ipc-mode": "shareable"
}
EOF
systemctl daemon-reload && systemctl enable --now docker

部署kubeadm&kubelet&kubectl

# master、node
# 添加源
	curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
	add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main"
	apt-get update

# 查询版本信息
	apt-cache madison kubelet kubectl kubeadm

# 安装kubeadm & kubelet & kubectl
	apt-get install kubelet=1.20.10-00 kubeadm=1.20.10-00 kubectl=1.20.10-00

# 启动kubelet
	systemctl enable kubelet --now && systemctl start kubelet
	
# 查询默认镜像拉取点,可以看到默认是从k8s.gcr.io去拉取镜像,可以去registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.10 用docker先拉取
root@k8s-master-1:~# kubeadm config images list
W0105 14:05:58.820412    3446 version.go:102] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://dl.k8s.io/release/stable-1.txt": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
W0105 14:05:58.820514    3446 version.go:103] falling back to the local client version: v1.20.10
k8s.gcr.io/kube-apiserver:v1.20.10
k8s.gcr.io/kube-controller-manager:v1.20.10
k8s.gcr.io/kube-scheduler:v1.20.10
k8s.gcr.io/kube-proxy:v1.20.10
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0

# 初始化k8s,master节点操作
root@k8s-master-1:~# kubeadm init --kubernetes-version=1.20.10 --apiserver-advertise-address=192.168.0.10 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.70.0.0/16 --service-cidr=10.0.0.0/16 --ignore-preflight-errors=SystemVerification
[init] Using Kubernetes version: v1.20.10
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.10]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.10 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.10 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 13.502950 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: iammrs.1166bffb1d8rjr78
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.10:6443 --token iammrs.1166bffb1d8rjr78 \
    --discovery-token-ca-cert-hash sha256:3403e9ff7daac7c5ed7dab94147d66545f5be580d0160923af08dc8dbe4b91c5 
    
# 此时MASTER节点镜像如下
root@k8s-master-1:~# docker image ls
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.20.10   945c9bce487a   4 months ago    99.7MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.20.10   644cadd07add   4 months ago    122MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.20.10   2f450864515d   4 months ago    116MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.20.10   4c9be8dc650b   4 months ago    47.3MB
registry.aliyuncs.com/google_containers/etcd                      3.4.13-0   0369cf4303ff   16 months ago   253MB
registry.aliyuncs.com/google_containers/coredns                   1.7.0      bfe3a36ebd25   18 months ago   45.2MB
registry.aliyuncs.com/google_containers/pause                     3.2        80d28bedfe5d   23 months ago   683kB


# 配置kubectl,master节点
root@k8s-master-1:~# mkdir -p $HOME/.kube
root@k8s-master-1:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@k8s-master-1:~# sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 查询当前节点,此时集群状态还是NotReady状态,因为没有安装网络插件
root@k8s-master-1:~# kubectl get nodes
NAME           STATUS     ROLES                  AGE     VERSION
k8s-master-1   NotReady   control-plane,master   7m23s   v1.20.10


# 查看加入集群命令
root@k8s-master-1:~# kubeadm token create --print-join-command
kubeadm join 192.168.0.10:6443 --token 66c402.m94sglysmmx2bhkp     --discovery-token-ca-cert-hash sha256:3403e9ff7daac7c5ed7dab94147d66545f5be580d0160923af08dc8dbe4b91c5 

# k8s-node-1、k8s-node-2加入集群,可以使用同一个token加入系统
root@k8s-node-1:~# kubeadm join 192.168.0.10:6443 --token 66c402.m94sglysmmx2bhkp     --discovery-token-ca-cert-hash sha256:3403e9ff7daac7c5ed7dab94147d66545f5be580d0160923af08dc8dbe4b91c5
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# 查询nodes,<none>就表示这个节点是工作节点
root@k8s-master-1:~# kubectl get nodes
NAME           STATUS     ROLES                  AGE    VERSION
k8s-master-1   NotReady   control-plane,master   16m    v1.20.10
k8s-node-1     NotReady   <none>                 2m9s   v1.20.10
k8s-node-2     NotReady   <none>                 87s    v1.20.10

# 将k8s-node-1,k8s-node-2改为work节点
	kubectl label node k8s-node-1 node-role.kubernetes.io/worker=worker
	kubectl label node k8s-node-2 node-role.kubernetes.io/worker=worker

# 查询nodes
root@k8s-master-1:~# kubectl get nodes
NAME           STATUS     ROLES                  AGE     VERSION
k8s-master-1   NotReady   control-plane,master   18m     v1.20.10
k8s-node-1     NotReady   worker                 4m29s   v1.20.10
k8s-node-2     NotReady   worker                 3m47s   v1.20.10

# k8s-node-1 镜像如下
root@k8s-node-2:~# docker image ls
REPOSITORY                                           TAG        IMAGE ID       CREATED         SIZE
registry.aliyuncs.com/google_containers/kube-proxy   v1.20.10   945c9bce487a   4 months ago    99.7MB
registry.aliyuncs.com/google_containers/pause        3.2        80d28bedfe5d   23 months ago   683kB

部署calico

# 下载文件
	curl -O https://docs.projectcalico.org/manifests/calico-etcd.yaml

# CALICO_IPV4POOL_CIDR修改为pod IP
# 修改的地方
	- name: CALICO_IPV4POOL_CIDR
	  value: "10.70.2.0/16" 
	- name: IP_AUTODETECTION_METHOD
	  value: "interface=ens.*"  #这里改成实际物理网卡名称,支持正则表达
	  
# 修改etcd,使用外置etcd,secret中的修改点
	etcd-ca: null -> etcd-ca=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'`
	etcd-cert: null -> etcd-cert=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'`
	etcd-key: null -> etcd-key=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'`

# configmap修改点
	ETCD_ENDPOINTS=192.168.0.10:2379
    etcd_ca: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
    etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
    etcd_key: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"

部署metric server

# 创建metrics-server配置文件,这里我设置了只在k8s-master-1节点运行,读者按照自己想法修改即可,如果取消只在k8s-master-1上运行,请将/etc/kubernetes/pki/front-proxy-ca.crt 文件拷贝到其他节点上
cat > metrics-server.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      nodeName: k8s-master-1  #如果需要取消只在k8s-master-1上运行,将当前行到operator行删除即可
      tolerations:
      - key: "node-role.kubernetes.io/master"
        operator: "Exists"
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --metric-resolution=30s
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.crt
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra-
        image: registry.aliyuncs.com/google_containers/metrics-server:v0.4.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          periodSeconds: 10
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - name: ca-ssl
          mountPath: /etc/kubernetes/ssl
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100
EOF


# 查询资源使用率
root@k8s-master-1:~/k8s# kubectl top nodes
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master-1   191m         9%     1688Mi          44%       
k8s-node-1     104m         5%     1476Mi          38%       
k8s-node-2     121m         6%     1248Mi          32%

网络测试

注:

  1. busybox最好选用1.28,最新版本有BUG

  2. Pod必须能解析Service

  3. Pod必须能解析跨namespace的Service

  4. 每个节点必须要能访问Kubernetes的kubenetes svc 10.0.0.10:443和kube-dns的service 10.0.0.10:53

  5. Pod和Pod之间要能通信

    • 同namespace能通信
    • 跨namespace能通信
    • 跨机器能通信
# 部署测试pod
root@k8s-master-1:~/k8s# cat network-test.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: ns-1
---
apiVersion: v1
kind: Namespace
metadata:
  name: ns-2
 
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox-1
  namespace: ns-1
spec:
  nodeName: k8s-master-1
  tolerations:
  - key: node-role.kubernetes.io/master
    operator: Exists
  containers:
  - name: busybox
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - sleep
    - "86400"
  restartPolicy: OnFailure
---
apiVersion: v1
kind: Service
metadata:
  name: ns-service
  namespace: ns-2
spec:
  ports:
  - name: test
    port: 80
  type: ClusterIP
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox-2
  namespace: ns-1
spec:
  nodeName: k8s-node-1
  containers:
  - name: busybox
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - sleep
    - "86400"
  restartPolicy: OnFailure
---
apiVersion: v1
kind: Pod
metadata:
  name: busybox-3
  namespace: ns-2
spec:
  nodeName: k8s-node-2
  containers:
  - name: busybox
    image: busybox:1.28
    imagePullPolicy: IfNotPresent
    command:
    - sleep
    - "86400"
  restartPolicy: OnFailure
# 测试解析同一个namespace下的service
[root@k8s-master-1 ssl]# kubectl exec busybox-1 -- nslookup kubernetes
Server:    10.0.0.10
Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local

# 跨namespace解析service
[root@k8s-master-1 ssl]# kubectl exec busybox-1 -- nslookup kube-dns.kube-system
Server:    10.0.0.10
Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kube-dns.kube-system
Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local

# 同namespace要能访问,且跨机器节点也要能访问
root@k8s-master-1:~/k8s# kubectl get pods -A -o wide
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP         
.........................................
ns-1          busybox-1                                  1/1     Running   0          3m53s   10.70.196.8    k8s-master-1   <none>           <none>
ns-1          busybox-2                                  1/1     Running   0          3m53s   10.70.109.72   k8s-node-1     <none>           <none>
ns-2          busybox-3                                  1/1     Running   0          3m53s   10.70.140.68   k8s-node-2     <none>           <none>

root@k8s-master-1:~/k8s# kubectl exec busybox-1 -n ns-1 -- ping 10.70.140.68
PING 10.70.140.68 (10.70.140.68): 56 data bytes
64 bytes from 10.70.140.68: seq=0 ttl=62 time=0.775 ms
64 bytes from 10.70.140.68: seq=1 ttl=62 time=0.542 ms
64 bytes from 10.70.140.68: seq=2 ttl=62 time=0.448 ms
64 bytes from 10.70.140.68: seq=3 ttl=62 time=1.237 ms
^C

Dashboard部署

官网链接:https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

# 官网没有设置dashboard在哪些节点运行,我这里添加了nodeselector和toleration,让其在master节点运行,并将svc以nodeport方式暴露出来
cat << EOF > dashboard.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort          #将其在宿主机暴露出来
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005     # 将NodePort固定
  selector:
    k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      nodeName: k8s-master-1
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.3.1
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      nodeName: k8s-master-1
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
# 授权部分(增加部分)
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-service-account
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-service-account
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-service-account
  namespace: kube-system
EOF
# 查看当前pod
root@k8s-master-1:~/k8s# kubectl get pods -n kubernetes-dashboard
NAME                                        READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-8c895bffb-dstdv   1/1     Running   0          108s
kubernetes-dashboard-55d579d8c8-5mjqq       1/1     Running   0          108s

# 查看svc
root@k8s-master-1:~/k8s# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.0.214.176   <none>        8000/TCP        2m2s
kubernetes-dashboard        NodePort    10.0.86.189    <none>        443:30005/TCP   2m2s

# 获取admin-service-account的token
root@k8s-master-1:~/k8s# kubectl describe secret $(kubectl get secret -n kube-system | grep admin-service-account | awk '{print $1}') -n kube-system 
Name:         admin-service-account-token-ql8xn
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-service-account
              kubernetes.io/service-account.uid: 254b2a57-7d02-4508-a4c3-9f6f5c365921

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkdFcUkxZTJlUDB5VXhiUkIwQmlCWnF6bXQyc0RsMmJTSjc1UENZOU1qNUEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi1zZXJ2aWNlLWFjY291bnQtdG9rZW4tcWw4eG4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiYWRtaW4tc2VydmljZS1hY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMjU0YjJhNTctN2QwMi00NTA4LWE0YzMtOWY2ZjVjMzY1OTIxIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmFkbWluLXNlcnZpY2UtYWNjb3VudCJ9.kqaqvZvGh1LB_nt95XpRBaqQ7PJTjt49uex_RBHeThOvUyePevW9zqZhyyjaPudCEwHh12SOBx6eLKq5IY2aWrm1QZmcCOxjYgmn-JSYmst9VYcQQxYdqbGl55rRvJuXZXVxytMTx06oL0db7jpwQtLTahxabnR7jYwTMmyMw4lLtNrgClv22PkQZX2H6_HGyiRZFs34FEOaC60WBc9YnrIiIk1CaMWEQKuZE7F4cHRFmJWpbdeDYJxszQOVz_rCxdEEUThAyfKD5ed62xoG0OYsP5I7NtOsrN1K_sIz7dN3F6uJJj2yJDG6qRCPB46xXTZKpVcIQATSOnx-LMwwQQ

在这里插入图片描述

​ 将截图的token填入后,登录dashboard

在这里插入图片描述

`

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

旺仔_牛奶

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值