CKA 真题
1.RBAC
#为部署流水线创建一个新的clusterrole并将其绑定到范围为特定的namespace的特定ServiceAccount
# Task:常见一个名为deployment-clusterrole的clusterrole,该clusterrole只允许对deployment,daemonsets,statefulset具有create权限,在现有的namespace app-team1中创建一个名为cicd-token的新serviceAccount。
#限于namespace app-team1中,将新的clusterrole deployment-clusterrole绑定到新的serviceAccount cicd-token。
# 参考:https://kubernetes.io/zh-cn/docs/reference/access-authn-authz/rbac/
kubectl config use-context k8s
kubectl create clusterrole foo --verb=create --resource=deployment,statfulset,daemonsets
kubectl create ns app-team1
kubectl create serviceaccount cicd-token -n app-team1
kubectl create rolebinding cicd-token-binding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
2.指定node节点不可用
# Task: 将ek8s-node-1节点设置为不可用,然后重新调度该节点上的所有pod
# 参考:https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/
# 切换到指定集群
kubectl config use-context ek8s
# 设置节点为不可用
kubectl cordon node1
# 驱逐node1上面的pod
kubectl drain node1 --delete-emptydir-data --ignore-daemonsets --force
# 检查
kubectl get pods -n kube-system -o wide
3.k8s版本升级
#如何离线主机,并升级控制面板和升级节点
# Task: 现有的kubernetes集群正在运行版本1.23.1、仅将master节点上的所有Kubernetes控制平面和节点组件升级到版本1.23.2。确保在升级之前drain master节点,并在升级后uncordon master节点
#另外,在主节点上升级kubelet和kubectl.请不要升级工作节点,etcd,container管理器,CNI插件,DNS服务或其他任何插件
# 参考:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/
kubectl crodon master1
kubectl drain master --delete-emptydir-data --ignore-daemonsets --force
# 进入master节点
ssh master01
# 切换到root
sudo -i
# 查看版本
apt-cache show kubeadm |grep 1.23.2 或 apt-cache madison kubeadm
apt-get update
apt-get install kubeadm=1.23.2-00
apt-get install kubectl=1.23.2-00
# 验证版本
kubeadm version
kubectl version
# 验证升级计划
kubeadm upgrade plan
# 排除etcd,升级其他的
kubeadm upgrade apply v1.23.2 --etcd-upgrade=false
# 检查(根据进程AGE时间判断)
kubectl get pods -n kube-system
# 退出root用户
exit
# 退出master01节点
exit
# 在node1节点上恢复master01的调度
kubectl uncordon master01
# 检查
kubectl get node
4.etcd备份还原
# Task: 首先,为运行在https://127.0.0.1:2379上的现有etcd实例创建快照并将快照保存到/srv/data/etcd-snapshot.db文件,
# 然后还原位于/var/lib/backup/etcd-snapshot-previous.db的现有先前快照。
# 提供了以下密钥和TLS证书,以通过etcdctl连接到服务器。
# CA 证书: /opt/KUIN00601/ca.crt
# 客户端证书: /opt/KUIN00601/etcd-client.crt
# 客户端密钥: /opt/KUIN00601/etcd-client.key
# 参考:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/configure-upgrade-etcd/
# 安装etcd
cp etcd-v3.4.13-linux-amd64etcdctl /usr/bin/
# 在node1终端执行
# 备份etcd
# 目录在 /etc/kubernetes/pki/etcd/
export ETCDCTL_API=3
sudo mkdir /srv/data
sudo ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \
--cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt \
--key=/opt/KUIN00601/etcd-client.key snapshot save /srv/data/etcd-snapshot.db
# 恢复etcd
export ETCDCTL_API=3
sudo etcdctl --endpoints=https://127.0.0.1:2379 \
--cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt \
--key=/opt/KUIN00601/etcd-client.key snapshot restore /srv/data/etcd-snapshot.db
5.网络策略NetworkPolicy
# Task: 在现有的namespace my-app中创建一个名为allow-port-from-namespace的新NetworkPolicy。确保新的NetworkPolicy允许namespace echo 中的pods连接到namespace my-app 中的pods的9000端口。
# 进一步确保新的NetworkPolicy:
# 不允许对没有在监听端口9000的pods的访问
# 不允许非来自namespace echo中的pods的访问
# 参考: https://kubernetes.io/zh-cn/docs/concepts/services-networking/network-policies/
# 设置配置环境
kubectl config use-context hk8s
# 检查环境情况
kubectl get ns --show-labels
# 打标签
kubectl create ns my-app
kubectl create ns echo
kubectl label ns echo project=echo
# 1.拷贝NetworkPolicy的示例,然后修改成如下
cat > networkpolicy.yaml << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-port-from-namespace
namespace: my-app
spec:
podSelector:
matchLabels: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
project: echo
ports:
- protocol: TCP
port: 9000
# 检查
kubectl get networkpolicy -n my-app
6.四层负载均衡service
# Task: 重新配置一个已经存在的front-end的deployment,在名字为nginx的容器里面添加一个端口配置,名字为http,暴露端口号为80,然后创建一个service,名字为front-end-svc,暴露该deployment的http端口,并且service的类型为NodePort。
# https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/
# deployment配置
cat deployment1.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: front-end
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
EOF
kubectl apply -f deployment1.yaml
kubectl get deploy
# 编辑修改deployment的容器配置,添加nginx端口和名称
kubectl explain deployment.spec.template.spec.containers.ports
kubectl edit deployment front-end
# 检查
kubectl decribe deploy front-end
kubectl get deploy front-end -o yaml
# 创建service
cat > service1.yaml << EOF
---
apiVersion: v1
kind: Service
metadata:
name: front-end-svc
spec:
type: NodePort
ports:
- port: 80
# 目标端口
targetPort: http
# 关联具有nginx标签的pod
selector:
app: nginx
EOF
# 或者使用命令行创建service
# kubectl expose --help
# kubectl expose deployment front-end --name=front-end-svc --port=80 --target-port=http --type=NodePort
# 检查
curl localhost:xxxx
7.ingress七层代理
# Task: 如下创建一个新的nginx ingress资源:名称:pong namespace: ing-internal 使用服务端口5678在路径/hello上公开服务hello
# 可以使用一下命令检查服务hello的可用性,该命令应返回hello:curl -kL <INTERNAL_IP>/hello
# 参考: https://kubernetes.io/zh-cn/docs/concepts/services-networking/ingress/
# 检查配置
# 检查ingress controller 没有搭建
# 检查namespace
kubectl create ns ing-internal
# 配置ingress
cat > ingress.yaml << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pong
namespace: ing-internal
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- http:
paths:
- path: /hello
pathType: Prefix
backend:
service:
name: test
port:
number: 5678
EOF
kubectl apply -f ingress.yaml
# 检查
kubectl get ingress -n ing-internal
8.deployment实现pod扩缩容
# Task: 将loadbalance的deployment管理的Pod的副本数扩容成6个
# 参考: https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/statefulset/
# 扩容
kubectl scale --help
kubectl scale --replicas=6 deployment/loadbalance
# 或者编辑loadbalance
# kubectl edit deployment loadbalance
9.Pod指定节点调度部署
# Task: 创建一个Pod,名字为nginx-kusc00401,镜像地址是nginx,调度到具有disk=spinning标签的节点上
# 参考: https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/assign-pods-nodes/
# kubectl explain pod.spec.nodeselector
# 检查节点是否有标签
kubectl get node --show-labels
# 没有则添加 kubectl label node node1 disk=spinning
cat > pod.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-kusc00401
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
nodeSelector:
disk: spinning
10.检查ready节点数量
# Task: 检查集群中有多少个ready的节点(不包括被打上Taint:NoSchedule的节点),之后将梳理写到/opt/KUSC00402、kusc00402.txt
# -w 完全匹配
kubectl get nodes |grep -w "Ready" |wc -l
kubectl describe nodes master1 node1 |grep "Taint" |grep "NoSchedule" |wc -l
echo "xxx" > `p=/opt/KUSC00402/; [[ ! -d "$p" ]] && mkdir -p "$p" ;echo $p/kusc00402.txt `
11.pod封装多个容器
# Task: 创建一个Pod,名字为kuccl,这个Pod包含4个容器,为nginx、redis、memcached、consul
# 参考:https://kubernetes.io/zh-cn/docs/concepts/workloads/pods/
cat > kuccl.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: kuccl
spec:
containers:
- name: nginx
image: nginx:1.14.2
name: redis
image: redis
name: memcached
image: memcached
name: consul
image: consul
EOF
kubectl apply -f kuccl.yaml
kubectl get pods
12.持久化存储卷PersistentVolume
# Task: 创建一个pv,名字为app-config,大小为2Gi,访问权限为ReadWriteMany。volume的类型为hostPath,路径为/srv/app-config
# 参考: https://kubernetes.io/zh-cn/docs/concepts/storage/persistent-volumes/
# 编写pv
cat > pv.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: app-config
spec:
capacity:
storage: 2Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
hostPath:
path: "/srv/app-config"
EOF
kubectl apply -f pv.yaml
kubectl get pv
13.PVC
# Task: 创建一个名字为pv-volume的pvc,指定storageClass为csi-hostpath-sc,大小为10Mi,然后创建一个Pod,名字为web-server,镜像为nginx,并且挂载该PVC至/usr/share/nginx/html,挂载的权限为ReadWriteOnce。之后通过kubectl edit 或者kubectl path 将pvc改成70Mi,并且记录修改记录
# 参考:https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/configure-persistent-volume-storage/
# 从NFS或ceph供应商创建存储类
# 创建pvc
cat > pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-volume
spec:
storageClassName: csi-hostpath-sc
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Mi
EOF
# 创建pod
cat > pc-pod.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: web-server
spec:
volumes:
- name: pv-volume
persistentVolumeClaim:
claimName: pv-volume
containers:
- name: nginx
image: nginx
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: pv-volume
EOF
kubectl get pvc
# 调整pvc大小
kubectl edit pvc pv-volume --record
14.查看pod日志
# Task: 监控名为foobar的Pod的日志,并过滤出具有unable-access-website信息的行,然后将写入到/opt/KUTR00101/foobar
kubectl get pods
kubectl logs foobar |grep unable-access-website > opt/KUTR00101/foobar
15.sidecar代理
# Task: 使用busybox Image来将名为sidecar的sidecar容器添加到现有的Pod legacy-app上,新的sidecar容器必须运行一下命令: /bin/sh -c tail -n+1 -f /var/log/legacy-app.log
# 使用volume挂载/var/log目录,确保sidecar能访问/var/log/legacy-app.log文件
# 参考: https://kubernetes.io/zh-cn/docs/concepts/cluster-administration/logging/
# 模拟创建一个legacy-app的pod
cat > legacy-app.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: egacy-app
spec:
containers:
- name: count
image: busybox:1.28
args:
- /bin/sh
- -c
- >
i=0;
while true;
do
echo "$i: $(date)" >> /var/log/legacy-app.log;
i=$((i+1));
sleep 1;
done
volumeMounts:
- name: logs
mountPath: /var/log
volumes:
- name: logs
emptyDir: {}
EOF
# 创建sidecar代理容器
cat > c-sidecar.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: legacy-app
namespace: default
spec:
containers:
- args:
- /bin/sh
- -c
- "i=0; while true; do\n echo \"$i: $(date)\" >> /var/log/legacy-app.log;\n i=$((i+1));\n
\ sleep 1;\ndone \n"
image: busybox:1.28
imagePullPolicy: IfNotPresent
name: count
volumeMounts:
- mountPath: /var/log
name: logs
- name: busybox
image: busybox
imagePullPolicy: IfNotPresent
args: [ '/bin/sh','-c','tail -n+1 -f /var/log/legacy-app.log' ]
volumeMounts:
- name: logs
mountPath: /var/log
volumes:
- emptyDir: {}
name: logs
kubectl apply -f c-sidecar.yaml
# 检查查看日志
kubectl logs legacy-app -c busybox
16.查看pod的cpu
# task: 找出标签是name=cpu-user的pod,并过滤出使用CPU最高的Pod,然后把它的名字写在已经存在的/opt/KUTR00401/KUTER00401.txt文件里(这里他没有说指定namespace。所以需要使用-A指定所有namespace)
# 安装metric
# 1. 上传压缩包到工作节点
# 2. load到docker
# 3. apply metric的yaml文件
kubectl get pods -n kube-system
# 查看所有名称空间的pod的cpu使用情况并排序
kubectl top pod -l name=cpu-loader --sort-by=cpu -A
echo xxx > /opt/KUTR00401/KUTER00401.txt
17.集群故障排查
# Task: 一个名为wk8s-node-0的节点状态为NotReady,让其他恢复至正常状态,并确认所有的更改开机自动完成。
# 连接wk8s-node-0节点并获得root权限
ssh wk8s-node-0
sudo -i
# 重启kubelet
systemctl status kubelet
systemctl restart kubelet
systemctl enable kubelet