CKA考试笔记,仅做个人学习使用

1. RBAC

#创建角色
kubectl create clusterrole deployment-clusterrole --verb=create --resource=deployment,statefulset,daemonset
#创建namespace
kubectl create namespace app-team1
#创建serviceaccount
kubectl create serviceaccount cicd-token  -n app-team1
#创建账户绑定
kubectl create clusterrolebinding cicd-token-binding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token -n app-team1

清理试验环境

kubectl delete namespaces app-team1
kubectl delete clusterrole deployment-clusterrole

2.drain node

kubectl drain cka003 --ignore-daemonsets --force --delete-emptydir-data

3.kubeadm downgrade&upgrade

记得东西太多,所以需要借助文档了:kubeadm upgrade
参照题目应该是先需要执行ssh cka001再执行sudo -i切换root用户
downgrade

apt update
apt-cache madison kubeadm
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.24.2-00 && \
apt-mark hold kubeadm
kubeadm version
kubeadm upgrade plan
sudo kubeadm upgrade apply v1.24.2 --etcd-upgrade=false
kubectl drain cka001 --ignore-daemonsets
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.24.2-00 kubectl=1.24.2-00 --allow-downgrades && \
apt-mark hold kubelet kubectl
sudo systemctl daemon-reload
sudo systemctl restart kubelet
kubectl uncordon cka001
记得两次exit退出sudo、ssh
ssh cak001
sudo -i
apt update
apt-cache madison kubeadm
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.24.3-00 && \
apt-mark hold kubeadm
kubeadm version
kubeadm upgrade plan
sudo kubeadm upgrade apply v1.24.3 --etcd-upgrade=false
kubectl drain cka001 --ignore-daemonsets
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.24.3-00 kubectl=1.24.3-00 && \
apt-mark hold kubelet kubectl
sudo systemctl daemon-reload
sudo systemctl restart kubelet
kubectl uncordon cka001
exit
exit

核心点三个:

--etcd-upgrade=false
--ignore-daemonsets
exit
exit

4.关于ectd的备份与还原


本地测试熟记:/etc/kubernetes/pki/etcd/xxxx
#执行etcd备份
etcdctl snapshot save -h
etcdctl snapshot save /data/backup/etcd-snapshot.db --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" --endpoints=127.0.0.1:2379
#执行etcd目录文件mv
mv /var/lib/etcd  /var/lib/etcd.bak2
#执行还原文件 
etcdctl snapshot restore -h
etcdctl snapshot restore /data/backup/etcd-snapshot.db --endpoints=https://127.0.0.1:2379 --cacert="/etc/kubernetes/pki/etcd/ca.crt" --cert="/etc/kubernetes/pki/etcd/server.crt" --key="/etc/kubernetes/pki/etcd/server.key" --data-dir="/var/lib/etcd"
核心点:
1、上面的kubeadm时不要给etcd升级,不然会导致etcdctl报异常
2、cert/key/cacert/endpoints使用命令行定位

5.networkPolicy

题目要求:允许现有namespace internal 中的所有pod都可以连接到对应pod的8080端口。不允许非 internal的namespace访问到。

# 模拟环境
kubectl create ns internal
kubectl create deploy nginx --image=nginx --port=80 -n internal
kubectl create deploy tomcat --image=tomcat --port=8080 -n internal
kubectl run centos --image=centos -n internal -- "/bin/sh" "-c" "sleep 3600"

搜索 kubernetes.io/docs/关键词:


apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-port-from-namespace
  namespace: internal
spec:
  podSelector:{}
  policyTypes:
    - Ingress
  ingress:
    - from:
        - namespaceSelector:
            matchLabels:
              kubernetes.io/metadata.name: internal
      ports:
        - protocol: TCP
          port: 6379

功能验证,感觉这里的功能验证更加重要。

kubectl get pods -n internal
# 查看pod对应IP地址
kubectl describe pods nginx -n internal 
kubectl describe pods tomcat -n internal 
# 登陆测试机centos
kubectl exec centos -n internal -i -t -- bash
$ curl <nginx_ip>:80
$ curl <tomcat_ip>:8080

核心点:
1、podSelector必填用 {} 即可
2、namespaceSelector 筛选要用 kubectl get ns interal --show-label

6.Service创建

模拟环境

kubectl create deployment front-end --image=nginx

巧用explain获取参数

kubectl explain deploy.spec.template.spec.containers.ports
	containerPort
 	name
 	protocol
kubectl edit deployment front-end
ports:
 - containerPort: 80
   name: http
   protocol: TCP

创建服务

kubectl expose deployment front-end --port=80 --target-port=http --name=front-end-svc

更新服务配置

kubectl edit svc front-end-svc
......
internalTrafficPolicy: Local
type: NodePort
......

核心点:
1、不知道内容可以使用kubectl explain deploy.spac.template.spac.containers.ports
2、expose时–target-port要使用http,而不是80

重置环境

kubectl delete deployment front-end
kubectl delete svc front-end-svc

7.创建一个ingress服务

配置模拟环境,实际考试时不占用时间。

kubectl patch ingressclass nginx -p '{"metadata": {"annotations": 
{"ingressclass.kubernetes.io/is-default-class": "true"}}}'
kubectl create ns ing-internal
kubectl create deploy hi --image=nginx --port=80 -n ing-internal
kubectl expose deploy hi --port=5678 --target-port=80 -n ing-internal

官方文件直接搜索 kind: ingress

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ping
  namespace: ing-internal
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx
  rules:
  - http:
      paths:
      - path: /hi
        pathType: Prefix
        backend:
          service:
            name: hi
            port:
              number: 5678

配置并获取clusterIP信息。

kubectl apply -f ingress.yaml
# 静等1分钟左右,才能正常显示clusterIP
kubectl get ingress -n ing-interal
root@cka001:~/exam# kubectl get ingress -A
NAMESPACE      NAME        CLASS   HOSTS               ADDRESS          PORTS   AGE
default        nginx-app   nginx   app1.com,app2.com   10.106.186.103   80      24d
ing-internal   ping        nginx   *                   10.106.186.103   80      80s
# 测试请求
root@cka001:~/exam # curl '10.106.186.103:80/hi'

核心注意点:

1、namespace: ing-interal
2、等待ingress配置完成
3、核心关键字 kind: ingress 

8.Scale deployment

scale 多多借助自动提示

kubectl create deployment presentation --image nginx
kubectl scale deployment presentation --replicas 3

9.创建pod依赖nodeSelector

从官网搜索 assign pod node,然后搜索 kind: Pod,会得到一个有nodeSelector的yaml,修改核心核心即可。

apiVersion: v1
kind: Pod
metadata:
  name: nginx-kusc00401
spec:
  containers:
  - name: nginx-kusc00401
    image: nginx
  nodeSelector:
    disk: spinning

一定要检查pod是否正常启动,如果启动不正常证明没有设置对node的信息,所以重新设置node 对应的 label即可。

10.统计node节点 Ready 且 非 noschedule 节点总数

查看ready节点总数

kubectl get nodes | grep Ready

记录一下NotReady的节点列表,比如cka005、cka006

设置污点节点

kubectl taint cak003 key=value:NodeScheduler

查看node数据

kubectl get node | egrep 'Name:|:NoSchedule'

root@cka001:~# kubectl describe node | egrep "Name:|:NoSchedule"
Name:               cka001
Taints:             node-role.kubernetes.io/control-plane:NoSchedule
                    node-role.kubernetes.io/master:NoSchedule
Name:               cka002
Name:               cka003
Taints:             key=value:NoSchedule
                    node.kubernetes.io/unschedulable:NoSchedule 

记录一下 有污点的节点列表,比如cka005、cka006。
计算所有NotReady和污点节点总数。
然后用总数减去该值即可。

11.使用一个pod创建多个容器

使用 dry-run 构建yaml文件模板

kubectl run kucc8 --image=nginx --dry-run=client -oyaml > multi_containers.yaml

基于模板做文件修改即可。

apiVersion: v1
kind: Pod
metadata:
  name: kucc8
spec:
  containers:
  - image: nginx
    name: nginx
  - image: redis
    name: redis
  - image: memcached
    name: memcached
  - image: consul
    name: consul

12.创建pv

搜索PersistentVolumes typed hostPath,进去查找 an example of hostPath

apiVersion: v1
kind: PersistentVolume
metadata:
  name: app-config
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadOnlyMany
  hostPath:
    path: "/svc/app-config"

13.创建pod绑定pvc并重新设置pvc存储大小且做记录

查询文档大法好,关键词:kind: PersistentVolumeClaim,选择那个“持久卷: PersistentVolum”
先创建pvc

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-volume
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Mi
  storageClassName: csi-hostpath-sc

再创建pod

apiVersion: v1
kind: Pod
metadata:
  name: web-server
spec:
  containers:
    - name: web-server
      image: nginx
      volumeMounts:
      - mountPath: "/usr/share/nginx/html"
        name: pv-sc
  volumes:
    - name: pv-sc
      persistentVolumeClaim:
        claimName: pv-volume

然后执行创建即可。
然后,后面使用kubectl edit pvc pv-volume --record=true记录响应更改即可。

14. kubectl logs

kubectl logs foobal| grep xxx> /opt/foobl
cat /opt/foobl

15.使用sidecar检测其他容器的volumeMount文件更新

busybox 是sidecar
big-corp-app 是已存在pod
logs 是两上容器的公共 mount_path
创建 emptyDir 卷,让两个container挂载上

首先创建big-cop的对应pod的初始化yaml,这部分不要求手动操作,每次测试通了即可。

cat > big-corp-app.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
  name: big-corp-app
spec:
  containers:
  - name: count
    image: busybox
    args:
    - /bin/sh
    - -c
    - >
      i=0;
      while true;
      do
        echo "$(date) INFO $i" >> /var/log/big-corp-app.log;
      i=$((i+1));
        sleep 1;
      done
EOF

然后其实基于这个做下构建。

kubectl apply -f big-corp-app.yaml
kubectl get pods big-corp-app -oyaml > big-corp-app-full.yaml

然后我们就可以编辑这个文件了

volumes:
- name: logs
  emptyDir: {}
spec:
  containers:
  - name: busybox
    image: busybox 
    args: ["/bin/sh","-c","tail -f /log/xxx"]
    volumeMounts:
    - mountMounts: /var/logs
      name: logs

最后执行如下:

kubectl delete pods big-cop
kubectl apply -f big-cop.yaml

记得试验后执行

kubectl delete pods big-cop

16.获取某标签的pod的cpu排行

root@cka001:~/exam# kubectl top pod --sort-by=cpu -l app=podinfo
NAME                       CPU(cores)   MEMORY(bytes)
podinfo-668b5b9b5b-9fcd2   1m           2Mi
podinfo-668b5b9b5b-zk9bc   0m           2Mi
echo 'podinfo-668b5b9b5b-9fcd2' > /opt/maxcpupod.txt

17.设置node有效

ssh cka003
sudo -i
systemctl start kubelet
systemctl enable kubelet
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

e421083458

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值