一、控制器
1.Deployment
1)Deploy图例
2)Deploy控制器
# 清理 Pod ,使用控制器创建
[root@master ~]# kubectl delete pod --all
# 资源对象模板
[root@master ~]# kubectl create deployment myweb --image=myos:httpd --dry-run=client -o yaml
[root@master ~]# vim mydeploy.yaml
---
kind: Deployment # 资源对象类型
apiVersion: apps/v1 # 版本
metadata: # 元数据
name: myweb # 名称
spec: # 详细定义
replicas: 2 # 副本数量
selector: # 定义标签选择器
matchLabels: # 支持 matchExpressions 表达式语法
app: httpd # 通过标签来确定那个 Pod 由它来管理
template: # 定义用来创建 Pod 的模板,以下为 Pod 定义
metadata:
labels:
app: httpd
spec:
restartPolicy: Always
containers:
- name: webserver
image: myos:httpd
imagePullPolicy: Always
[root@master ~]# kubectl apply -f mydeploy.yaml
[root@master ~]# kubectl get deployments
[root@master ~]# kubectl get replicasets
[root@master ~]# kubectl get pods
3)ClusterIP服务
# 创建服务访问集群
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80
selector:
app: httpd
ports:
- protocol: TCP
port: 80
targetPort: 80
[root@master ~]# kubectl apply -f websvc.yaml
[root@master ~]# curl -m 3 http://10.245.1.80
4)Pod维护管理
# 自维护自治理
[root@master ~]# kubectl get pods
# Pod 被删除后,Deploy 会自动创建新的 Pod 来维护集群的完整性
[root@master ~]# kubectl delete pod myweb-64b544dcbc-5mhqn
[root@master ~]# kubectl get pods
5)集群扩缩容
# 设置 1 Pod 集群
[root@master ~]# kubectl scale deployment myweb --replicas=1
[root@master ~]# kubectl get pods
# 设置 3 Pod 集群
[root@master ~]# kubectl scale deployment myweb --replicas=3
[root@master ~]# kubectl get pods
6)历史版本信息
# 查看历史版本
[root@master ~]# kubectl rollout history deployment myweb
# 添加注释信息
[root@master ~]# kubectl annotate deployments myweb kubernetes.io/change-cause="httpd.v1"
[root@master ~]# kubectl rollout history deployment myweb
7)滚动更新
# 修改镜像,滚动更新集群
[root@master ~]# kubectl set image deployment myweb webserver=myos:nginx
# 给新版本添加注释信息
[root@master ~]# kubectl annotate deployments myweb kubernetes.io/change-cause="nginx.v1"
# 查看历史版本信息
[root@master ~]# kubectl rollout history deployment myweb
# 访问验证服务
[root@master ~]# curl -m 3 http://10.245.1.80
8)版本回滚
# 历史版本与回滚
[root@master ~]# kubectl rollout undo deployment myweb --to-revision 1
[root@master ~]# curl -m 3 http://10.245.1.80
[root@master ~]# kubectl rollout history deployment myweb
# 删除控制器方法1
[root@master ~]# kubectl delete deployments myweb
# 删除控制器方法2
[root@master ~]# kubectl delete -f mydeploy.yaml
2.DaemonSet
1)DS图例
2)资源对象案例
[root@master ~]# cp -a mydeploy.yaml myds.yaml
[root@master ~]# vim myds.yaml
---
kind: DaemonSet # 资源对象类型
apiVersion: apps/v1
metadata:
name: myds # 控制器名称
spec:
# replicas: 2 # 删除副本参数
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
restartPolicy: Always
containers:
- name: webserver
image: myos:httpd
imagePullPolicy: Always
[root@master ~]# kubectl apply -f myds.yaml
[root@master ~]# kubectl get pods -o wide
3)污点干扰
# 设置污点,重建 daemonset
[root@master ~]# kubectl taint node node-0001 k=v:NoSchedule
[root@master ~]# kubectl delete -f myds.yaml
[root@master ~]# kubectl apply -f myds.yaml
# 有污点不会部署,特殊需求可以设置容忍策略
[root@master ~]# kubectl get pods
# 删除污点后会立即部署
[root@master ~]# kubectl taint node node-0001 k=v:NoSchedule-
[root@master ~]# kubectl get pods
# 删除控制器
[root@master ~]# kubectl delete -f myds.yaml
3.Job/CronJob
1)Job图例
Job -> Pod
2)Job控制器
kind -> Job
# 资源对象模板
[root@master ~]# kubectl create job myjob --image=myos:8.5 --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim myjob.yaml
---
kind: Job
apiVersion: batch/v1
metadata:
name: myjob
spec:
template: # 以下定义 Pod 模板
spec:
restartPolicy: OnFailure
containers:
- name: myjob
image: myos:8.5
command: ["/bin/bash"]
args:
- -c
- |
sleep 3
exit $((RANDOM%2))
[root@master ~]# kubectl apply -f myjob.yaml
# 失败了会重启
[root@master ~]# kubectl get pods -l job-name=myjob -w
[root@master ~]# kubectl get jobs.batch
# 删除Job控制器
[root@master ~]# kubectl delete -f myjob.yaml
3)CJ图例
4)资源对象案例
kind: CronJob
# 资源对象模板
[root@master ~]# kubectl create cronjob mycj --image=myos:8.5 --schedule='* * * * *' --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim mycj.yaml
---
kind: CronJob
apiVersion: batch/v1
metadata:
name: mycj
spec:
schedule: "* * * * 1-5"
jobTemplate: # 以下定义 Job 模板
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: myjob
image: myos:8.5
command: ["/bin/bash"]
args:
- -c
- |
sleep 3
exit $((RANDOM%2))
[root@master ~]# kubectl apply -f mycj.yaml
[root@master ~]# kubectl get cronjobs
# 按照时间周期,每分钟触发一个任务
[root@master ~]# kubectl get jobs -w
# 保留三次结果,多余的会被删除
[root@master ~]# kubectl get jobs
[root@master ~]# kubectl get jobs
# 删除CJ控制器
[root@master ~]# kubectl delete -f mycj.yaml
4.StatefulSet
1)STS图例
2)headless服务
# 配置 headless 服务
[root@master ~]# cp websvc.yaml mysvc2.yaml
[root@master ~]# vim mysvc2.yaml
---
kind: Service
apiVersion: v1
metadata:
name: mysvc2 # 服务名称
spec:
type: ClusterIP
clusterIP: None # 设置 IP 为 None
selector:
app: httpd
ports:
- protocol: TCP
port: 80
targetPort: 80
[root@master ~]# kubectl apply -f mysvc2.yaml
[root@master ~]# kubectl get service mysvc2
3)资源对象文件
[root@master ~]# cp -a mydeploy.yaml mysts.yaml
[root@master ~]# vim mysts.yaml
---
kind: StatefulSet # 资源对象类型
apiVersion: apps/v1
metadata:
name: mysts # 控制器名称
spec:
serviceName: mysvc2 # 新增 headless 服务名称
replicas: 3
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
restartPolicy: Always
containers:
- name: webserver
image: myos:httpd
imagePullPolicy: Always
[root@master ~]# kubectl apply -f mysts.yaml
[root@master ~]# kubectl get pods
[root@master ~]# host mysts-0.mysvc2.default.svc.cluster.local 10.245.0.10
[root@master ~]# host mysvc2.default.svc.cluster.local 10.245.0.10
# 删除sts控制器
[root@master ~]# kubectl delete -f mysts.yaml -f mysvc2.yaml
5.弹性云服务
1)HPA图例
官网文档搜索hpa:
2)创建后端服务
# 为 Deploy 模板添加资源配额
[root@master ~]# vim mydeploy.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: myweb
spec:
replicas: 1 # 修改副本数量
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
restartPolicy: Always
containers:
- name: webserver
image: myos:httpd
imagePullPolicy: Always
resources: # 为该资源设置配额
requests: # HPA 控制器会根据配额使用情况伸缩集群
cpu: 200m # CPU 配额
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80
selector:
app: httpd
ports:
- protocol: TCP
port: 80
targetPort: 80
[root@master ~]# kubectl apply -f mydeploy.yaml -f websvc.yaml
# 验证服务
[root@master ~]# kubectl top pods
[root@master ~]# curl -s http://10.245.1.80
3)创建HPA控制器
[root@master ~]# vim myhpa.yaml
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v1
metadata:
name: myweb
spec:
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
scaleTargetRef:
kind: Deployment
apiVersion: apps/v1
name: myweb
[root@master ~]# kubectl apply -f myhpa.yaml
# 刚刚创建 unknown 是正常现象,最多等待 60s 就可以正常获取数据
[root@master ~]# kubectl get horizontalpodautoscalers.autoscaling
[root@master ~]# kubectl get horizontalpodautoscalers.autoscaling
4)验证测试
# 终端 1 访问提高负载
[root@master ~]# while sleep 1;do
curl -s "http://10.245.1.80/info.php?id=100000" -o /dev/null
done
# 终端 2 监控 HPA 变化
[root@master ~]# kubectl get hpa -w
# 如果 300s 内平均负载小于标准值,就会自动缩减集群规模
[root@master ~]# kubectl get hpa -w