don’t forget change context
kubectl config use-context <name>
count nodes
kubectl get nodes
kubectl describe nodes|grep -i taint
kubectl describe nodes|grep -i taint|grep none|wc -l
retrieve Error Messages
kubectl logs -n <namespace> <pod> -c <container> | grep -i err
find the pod (most CPU)
kubectl top pod -n web --sort-by cpu -l app=auth
expose container port
...
spec:
containers:
- image: nginx
ports:
- containerPort: 80
svc expose to external
apiVersion: v1
kind: Service
metadata:
name: <svcname>
namespace: <ns>
spec:
type: NodePort
selector:
app: <app>
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
scale up deployment
kubectl scale deployment <name> -n <ns> --replicas=5
kubectl edit deployment <name> -n <ns>
ingress map to svc
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: <ingressname>
namespace: <ns>
spec:
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: <svcname>
port:
number: 80
create sa
kubectl create sa <name> -n <ns>
create cluserRole (pod reader)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: <crname>
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
bind sa with role
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: <rbname>
namespace: <ns>
subjects:
- kind: ServiceAccount
name: <saname>
roleRef:
kind: ClusterRole
name: <crname>
apiGroup: rbac.authorization.k8s.io
verify rb
kubectl get pods -n web --as=system:serviceaccount:<ns>:<rbname>
etcd backup
export ETCDCTL_API=3
etcdctl snapshot save /home/cloud_user/etcd_backup.db \
--endpoints=https://localhost:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
etcdctl --write-out=table snapshot status /home/cloud_user/etcd_backup.db
etcd restore
export ETCDCTL_API=3
sudo systemctl stop etcd
sudo rm -rf /var/lib/etcd
sudo etcdctl snapshot restore /home/cloud_user/etcd_backup.db \
--initial-cluster etcd-restore=https://localhost:2380 \
--initial-advertise-peer-urls https://localhost:2380 \
--name etcd-restore \
--data-dir /var/lib/etcd
sudo chown -R etcd:etcd /var/lib/etcd
sudo systemctl start etcd
verify
etcdctl get cluster.name \
--endpoints=https://localhost:2379 \
--cacert=/home/cloud_user/etcd-certs/etcd-ca.pem \
--cert=/home/cloud_user/etcd-certs/etcd-server.crt \
--key=/home/cloud_user/etcd-certs/etcd-server.key
upgrade (master, worker)
master
sudo apt-get update
sudo apt-get install -y --allow-change-held-packages kubeadm=1.20.2-00 kubelet=1.20.2-00 kubectl=1.20.2-00
kubectl drain master --ignore-daemonsets
sudo kubeadm upgrade plan v1.20.2
sudo kubeadm upgrade apply v1.20.2
sudo systemctl daemon-reload
sudo systemctl restart kubelet
kubectl uncordon master
worker
# on master
kubectl drain worker --ignore-daemonsets --force
# on worker
sudo apt-get update
sudo apt-get install -y --allow-change-held-packages kubeadm=1.20.2-00 kubelet=1.20.2-00 kubectl=1.20.2-00
sudo kubeadm upgrade node
sudo systemctl daemon-reload
sudo systemctl restart kubelet
# on master
kubectl uncordon worker
make a node unreachable
kubectl drain <node> --delete-local-data --ignore-daemonsets --force
label a node
kubectl label nodes <node> key=value
create a pod on specific node(with label)
apiVersion: v1
kind: Pod
metadata:
name: fast-nginx
namespace: dev
spec:
nodeSelector:
key: value
containers:
- name: nginx
image: nginx
pv, pvc
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: localdisk
provisioner: kubernetes.io/no-provisioner
allowVolumeExpansion: true
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-storage-pv
spec:
storageClassName: localdisk
persistentVolumeReclaimPolicy: Recycle
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /etc/data
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: host-storage-pvc
namespace: auth
spec:
storageClassName: localdisk
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
apiVersion: v1
kind: Pod
metadata:
name: pv-pod
namespace: auth
spec:
containers:
- name: busybox
image: busybox
command: ['sh', '-c', 'while true; do echo success > /output/output.log; sleep 5; done']
volumeMounts:
- name: pv-storage
mountPath: /output
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: host-storage-pvc
networkPolicy
Denies All Access
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: np-maintenance
namespace: foo
spec:
podSelector:
matchLabels:
app: maintenance
policyTypes:
- Ingress
- Egress
Allows All Pods in the users-backend Namespace to Communicate with Each Other Only on a Specific Port
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: np-users-backend-80
namespace: users-backend
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
app: users-backend
ports:
- protocol: TCP
port: 80
Multi-Container Pod
apiVersion: v1
kind: Pod
metadata:
name: multi
namespace: baz
spec:
containers:
- name: nginx
image: nginx
- name: redis
image: redis
Create a Pod Which Uses a Sidecar to Expose the Main Container’s Log File to Stdout
apiVersion: v1
kind: Pod
metadata:
name: logging-sidecar
namespace: baz
spec:
containers:
- name: busybox1
image: busybox
command: ['sh', '-c', 'while true; do echo Logging data > /output/output.log; sleep 5; done']
volumeMounts:
- name: sharedvol
mountpath: /output
- name: sidecar
image: busybox
command: ['sh', '-c', 'tail -f /input/output.log']
volumeMounts:
- name: sharedvol
mountPath: /input
volumes:
- name: sharedvol
emptyDir: {}
check
kubectl logs logging-sidecar -n baz -c sidecar
Determine What Is Wrong with the Broken Node
kubectl get nodes
kubectl describe node acgk8s-worker2
# on node
sudo journalctl -u kubelet
sudo systemctl status kubelet
sudo systemctl enable kubelet
sudo systemctl start kubelet