一、Job & Cronjob介绍
1.概述
1.1 Job
Job 会创建一个或者多个 Pod,并将继续重试 Pod 的执行,直到指定数量的 Pod 成功终止。 随着 Pod 成功结束,Job 跟踪记录成功完成的 Pod 个数。 当数量达到指定的成功个数阈值时,任务(即 Job)结束。 删除 Job 的操作会清除所创建的全部 Pod。 挂起 Job 的操作会删除 Job 的所有活跃 Pod,直到 Job 被再次恢复执行。
一种简单的使用场景下,你会创建一个 Job 对象以便以一种可靠的方式运行某 Pod 直到完成。 当第一个 Pod 失败或者被删除(比如因为节点硬件失效或者重启)时,Job 对象会启动一个新的 Pod。
你也可以使用 Job 以并行的方式运行多个 Pod。
1.2 Cronjob
CronJob 创建基于时隔重复调度的 Job。
CronJob 用于执行排期操作,例如备份、生成报告等。 一个 CronJob 对象就像 Unix 系统上的 crontab(cron table)文件中的一行。 它用 Cron 格式进行编写, 并周期性地在给定的调度时间执行 Job。
二、配置解读
1.Job配置
apiVersion: batch/v1 ## api版本
kind: Job ## 资源类型
metadata: ## 元数据
name: pi ## 资源名称
namespace: test ## 命名空间
spec: ## 详情
backoffLimit: 4 ## 最大失败次数
completions: 1 ## 要求完成的次数
parallelism: 1 ## 并发数量
activeDeadlineSeconds: 100 ## 最大运行时间
ttlSecondsAfterFinished: 20 ## ttl时间
template: ## Pod配置模板
spec: ## 规约
containers: ## 容器信息
- name: pi ## 容器名称
image: perl:5.34.0 ## 镜像
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] ## 容器运行命令
restartPolicy: Never ## 重启策略
2.Cronjob配置
apiVersion: batch/v1beta1 ## api版本
kind: CronJob ## 资源类型
metadata: ## 元数据
name: hello ## 资源名称
namespace: test ## 命名空间
spec: ## 详情
schedule: "* * * * *" ## 定时配置
successfulJobsHistoryLimit: 3 ## 保留运行完成pod的数量
jobTemplate: ## job配置模板
spec: ## job详细配置
template: ## pod配置模板
spec: ## pod详细配置
containers: ## 容器配置
- name: hello ## 容器名称
image: core.harbor.domain/test/busybox:latest ## 容器镜像
imagePullPolicy: IfNotPresent ## 镜像拉取策略
command: ## 镜像启动运行命令
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure ## 重启策略
imagePullSecrets: ## 镜像下载秘钥
- name: harbor-secret
重启策略 restartPolicy
Never:当 Pod 失败时,Job 控制器会启动一个新的 Pod
OnFailure:Pod 继续留在当前节点,但容器会被重新运行
三、演练
1.Job
1.1创建job
[root@k8s-master test]# vim job-pi.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: pi
namespace: test
spec:
backoffLimit: 4
completions: 1
parallelism: 1
activeDeadlineSeconds: 100
ttlSecondsAfterFinished: 20
template:
spec:
containers:
- name: pi
image: perl:5.34.0
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: Never
[root@k8s-master test]# kubectl apply -f job-pi.yaml
job.batch/pi created
1.2查看job pod生命周期
[root@k8s-master ~]# kubectl get pod -n test -o wide -w
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pi-8ckcl 0/1 Pending 0 0s <none> <none> <none> <none>
pi-8ckcl 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
pi-8ckcl 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
pi-8ckcl 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
pi-8ckcl 1/1 Running 0 1s 10.244.36.108 k8s-node1 <none> <none>
pi-8ckcl 0/1 Completed 0 5s 10.244.36.108 k8s-node1 <none> <none>
pi-8ckcl 0/1 Completed 0 5s 10.244.36.108 k8s-node1 <none> <none>
1.3查看job详细信息
[root@k8s-master test]# kubectl get job -n test -o wide
NAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR
pi 1/1 5s 110s pi perl:5.34.0 controller-uid=5ae4f32e-0e4e-43be-8461-59bfcef7c44c
1.4查看job运行结果
[root@k8s-master test]# kubectl logs -f pi-8ckcl -n test
3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
2.Cronjob
2.1创建Cronjob
[root@k8s-master test]# vim cronjob-hello.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: hello
namespace: test
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: core.harbor.domain/test/busybox:latest
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
imagePullSecrets:
- name: harbor-secret
[root@k8s-master test]# kubectl apply -f cronjob-hello.yaml
cronjob.batch/hello created
2.2查看Cronjob详情
[root@k8s-master test]# kubectl get cj -n test -o wide
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE CONTAINERS IMAGES SELECTOR
hello * * * * * False 1 12s 4m19s hello core.harbor.domain/test/busybox:latest <none>
2.3查看pod生命周期
[root@k8s-master test]# kubectl get pod -n test
NAME READY STATUS RESTARTS AGE
fluentd-elasticsearch-5nksk 1/1 Running 0 13d
fluentd-elasticsearch-q9wtk 1/1 Running 1 13d
fluentd-elasticsearch-wglqt 1/1 Running 1 13d
hello-1680838080-579kv 0/1 Completed 0 2m39s
hello-1680838140-qvf75 0/1 Completed 0 99s
hello-1680838200-mml5b 0/1 Completed 0 39s
[root@k8s-master ~]# kubectl get pod -n test -o wide -w
hello-1680838020-jf8pn 0/1 Pending 0 0s <none> <none> <none> <none>
hello-1680838020-jf8pn 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 Completed 0 2s 10.244.36.112 k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 Completed 0 2s 10.244.36.112 k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 Pending 0 0s <none> <none> <none> <none>
hello-1680838080-579kv 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 ContainerCreating 0 1s <none> k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 Completed 0 2s 10.244.36.113 k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 Completed 0 2s 10.244.36.113 k8s-node1 <none> <none>
hello-1680838140-qvf75 0/1 Pending 0 0s <none> <none> <none> <none>
hello-1680838140-qvf75 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
hello-1680838140-qvf75 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838140-qvf75 0/1 ContainerCreating 0 1s <none> k8s-node1 <none> <none>
hello-1680838140-qvf75 0/1 Completed 0 2s 10.244.36.114 k8s-node1 <none> <none>
hello-1680838140-qvf75 0/1 Completed 0 2s 10.244.36.114 k8s-node1 <none> <none>
hello-1680838200-mml5b 0/1 Pending 0 0s <none> <none> <none> <none>
hello-1680838200-mml5b 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
hello-1680838200-mml5b 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838200-mml5b 0/1 ContainerCreating 0 1s <none> k8s-node1 <none> <none>
hello-1680838200-mml5b 0/1 Completed 0 2s 10.244.36.115 k8s-node1 <none> <none>
hello-1680838200-mml5b 0/1 Completed 0 2s 10.244.36.115 k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 Terminating 0 3m11s 10.244.36.112 k8s-node1 <none> <none>
hello-1680838020-jf8pn 0/1 Terminating 0 3m11s 10.244.36.112 k8s-node1 <none> <none>
hello-1680838260-ptmrd 0/1 Pending 0 0s <none> <none> <none> <none>
hello-1680838260-ptmrd 0/1 Pending 0 0s <none> k8s-node1 <none> <none>
hello-1680838260-ptmrd 0/1 ContainerCreating 0 0s <none> k8s-node1 <none> <none>
hello-1680838260-ptmrd 0/1 ContainerCreating 0 1s <none> k8s-node1 <none> <none>
hello-1680838260-ptmrd 0/1 Completed 0 2s 10.244.36.116 k8s-node1 <none> <none>
hello-1680838260-ptmrd 0/1 Completed 0 2s 10.244.36.116 k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 Terminating 0 3m11s 10.244.36.113 k8s-node1 <none> <none>
hello-1680838080-579kv 0/1 Terminating 0 3m11s 10.244.36.113 k8s-node1 <none> <none>
2.4 查看运行结果
[root@k8s-master test]# kubectl logs -f hello-1680840900-5c72g -n test
Fri Apr 7 04:15:01 UTC 2023
Hello from the Kubernetes cluster
[root@k8s-master test]# kubectl logs -f hello-1680840840-r2zn8 -n test
Fri Apr 7 04:14:01 UTC 2023
Hello from the Kubernetes cluster