在kubernetes 1.8以下的版本滚动升级 要求使用deployment
控制器deployment 是用来管理无状态应用的,面向的集群的管理,而不是面向的是一个不可变的个体。简单来说就一笼包子吃掉一个在放一个进去就行了。
Deployment为Pod和ReplicaSet提供了一个声明式定义(declarative)方法,用来替代以前的ReplicationController来方便的管理应用。典型的应用场景包括:
- 定义Deployment来创建Pod和ReplicaSet
- 滚动升级和回滚应用
- 扩容和缩容
- 暂停和继续Deployment
网上找个小栗子:
[root@cicp-k8s-master1 test]# cat nginx.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: registry:5000/nginx:1.9.1
ports:
先创建然后查询下
[root@cicp-k8s-master1 heapster]# kubectl create -f nginx.yaml --record
[root@cicp-k8s-master1 heapster]# kubectl get deployment -o wide
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
nginx-deployment 3 3 3 3 46s
先扩个容
[root@cicp-k8s-master1 heapster]# kubectl scale deployment nginx-deployment --replicas 4
deployment "nginx-deployment" scaled
[root@cicp-k8s-master1 heapster]# kubectl get deployment -o wide
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
nginx-deployment 4 4 4 4 4m
[root@cicp-k8s-master1 heapster]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
mongo-6sv3i 1/1 Running 9 1y 172.17.56.3 cicp-k8s-node2
nginx-deployment-1969799642-3l01p 1/1 Running 0 9m 172.17.83.9 cicp-k8s-node5
nginx-deployment-1969799642-9s5tv 1/1 Running 0 9m 172.17.39.5 cicp-k8s-node6
nginx-deployment-1969799642-nunr4 1/1 Running 0 5m 172.17.8.4 cicp-k8s-node1
nginx-deployment-1969799642-sa1tg 1/1 Running 0 9m 172.17.63.5 cicp-k8s-node3
更新镜像:
kubectl set image deployment/nginx-deployment nginx=registry:5000/nginx:1.7.9 --record
nginx=registry:5000/nginx:1.7.9 这个前一个参数是app名字,后面是镜像版本
查看状态和服务:
kubectl rollout status deployment/nginx-deployment
镜像改变了
[root@cicp-k8s-master1 ~]# kubectl describe pod nginx-deployment-2457125344-3hzc5
Name: nginx-deployment-2457125344-3hzc5
Namespace: default
Node: cicp-k8s-node1/10.169.2.27
Start Time: Mon, 15 Jul 2019 18:05:48 +0800
Labels: app=nginx
pod-template-hash=2457125344
Status: Running
IP: 172.17.8.4
Controllers: ReplicaSet/nginx-deployment-2457125344
Containers:
nginx:
Container ID: docker://1fd3ac70081be7465cd4e2ba98ca9470ebc6676cf9824365b37a8ecdbd37c965
Image: registry:5000/nginx:1.7.9
Image ID: docker://sha256:84581e99d807a703c9c03bd1a31cd9621815155ac72a7365fd02311264512656
Port: 80/TCP
State: Running
Started: Mon, 15 Jul 2019 18:05:48 +0800
Ready: True
Restart Count: 0
Volume Mounts: <none>
Environment Variables: <none>
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations: <none>
Events:
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
32s 32s 1 {default-scheduler } Normal Scheduled Successfully assigned nginx-deployment-2457125344-3hzc5 to cicp-k8s-node1
32s 32s 1 {kubelet cicp-k8s-node1} spec.containers{nginx} Normal Pulled Container image "registry:5000/nginx:1.7.9" already present on machine
32s 32s 1 {kubelet cicp-k8s-node1} spec.containers{nginx} Normal Created Created container with docker id 1fd3ac70081b; Security:[seccomp=unconfined]
32s 32s 1 {kubelet cicp-k8s-node1} spec.containers{nginx} Normal Started Started container with docker id 1fd3ac70081b
终止升级
kubectl rollout pause deployment/nginx-deployment
继续升级
kubectl rollout resume deployment/nginx-deployment
操作记录
[root@cicp-k8s-master1 test]# kubectl rollout history deployments
deployments "nginx-deployment"
REVISION CHANGE-CAUSE
1 kubectl scale deployment nginx-deployment --replicas 4
2 kubectl set image deployment/nginx-deployment nginx=registry:5000/nginx:1.7.9 --record
回退到指定版本
回退到第一个版本
kubectl rollout undo deployment/nginx-deployment --to-revision=1
查看现在的pod
[root@cicp-k8s-master1 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
mongo-6sv3i 1/1 Running 9 1y
nginx-deployment-1969799642-6vgw5 1/1 Running 0 26s
nginx-deployment-1969799642-902r4 1/1 Running 0 27s
nginx-deployment-1969799642-fgwk1 1/1 Running 0 27s
nginx-deployment-1969799642-znrpi 1/1 Running 0 26s
tyk-dashboard-cmqvy 1/1 Running 9 1y
tyk-gateway-r6eap 1/1 Running 9 1y
[root@cicp-k8s-master1 ~]# kubectl describe pod nginx-deployment-1969799642-znrpi
Name: nginx-deployment-1969799642-znrpi
Namespace: default
Node: cicp-k8s-node5/10.169.2.31
Start Time: Mon, 15 Jul 2019 18:11:23 +0800
Labels: app=nginx
pod-template-hash=1969799642
Status: Running
IP: 172.17.83.9
Controllers: ReplicaSet/nginx-deployment-1969799642
Containers:
nginx:
Container ID: docker://85bb72d79329af37497f5b61d96acc5ea3793e2e46b9b45badabb31af40b5e76
Image: registry:5000/nginx:1.9.1
Image ID: docker://sha256:94ec7e53edfc793d6d8412b4748cd84270da290ce9256730eb428574f98f7c95
Port: 80/TCP
State: Running
Started: Mon, 15 Jul 2019 18:11:24 +0800
Ready: True
Restart Count: 0
Volume Mounts: <none>
Environment Variables: <none>
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations: <none>
Events:
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
50s 50s 1 {default-scheduler } Normal Scheduled Successfully assigned nginx-deployment-1969799642-znrpi to cicp-k8s-node5
49s 49s 1 {kubelet cicp-k8s-node5} spec.containers{nginx} Normal Pulled Container image "registry:5000/nginx:1.9.1" already present on machine
49s 49s 1 {kubelet cicp-k8s-node5} spec.containers{nginx} Normal Created Created container with docker id 85bb72d79329; Security:[seccomp=unconfined]
49s 49s 1 {kubelet cicp-k8s-node5} spec.containers{nginx} Normal Started Started container with docker id 85bb72d79329
其他修改方式:
kubectl edit deployment/nginx-deployment
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "3"
kubernetes.io/change-cause: kubectl edit deployment/nginx-deployment
creationTimestamp: 2019-07-15T09:50:28Z
generation: 10
labels:
app: nginx
name: nginx-deployment
namespace: default
resourceVersion: "125366735"
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment
uid: fa30a2da-a6e5-11e9-adca-000c296454ba
spec:
replicas: 2
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
# 实例数我改成了 2
[root@cicp-k8s-master1 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
mongo-6sv3i 1/1 Running 9 1y
nginx-deployment-1969799642-902r4 1/1 Running 0 6m
nginx-deployment-1969799642-fgwk1 1/1 Running 0 6m
tyk-dashboard-cmqvy 1/1 Running 9 1y
tyk-gateway-r6eap 1/1 Running 9 1y
deployment文件说明
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx
namespace: kube-system
labels:
app: nginx
spec:
replicas: 3
# minReadySeconds: 60 #滚动升级时60s后认为该pod就绪
strategy:
rollingUpdate: ##由于replicas为3,则整个升级,pod个数在2-4个之间
maxSurge: 1 #滚动升级时会先启动1个pod
maxUnavailable: 1 #滚动升级时允许的最大Unavailable的pod个数
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 60 ##k8s将会给应用发送SIGTERM信号,可以用来正确、优雅地关闭应用,默认为30秒
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
livenessProbe: #kubernetes认为该pod是存活的,不存活则需要重启
httpGet:
path: /health
port: 80
scheme: HTTP
initialDelaySeconds: 60 ## equals to the maximum startup time of the application + couple of seconds
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe: #kubernetes认为该pod是启动成功的
httpGet:
path: /health
port: 80
scheme: HTTP
initialDelaySeconds: 30 ## equals to minimum startup time of the application
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
resources:
# keep request = limit to keep this container in guaranteed class
requests:
cpu: 50m
memory: 200Mi
limits:
cpu: 500m
memory: 500Mi
ports:
- name: http
containerPort: 80
几个重要参数说明
maxSurge与maxUnavailable
maxSurge: 1 表示滚动升级时会先启动1个pod
maxUnavailable: 1 表示滚动升级时允许的最大Unavailable的pod个数
由于replicas为3,则整个升级,pod个数在2-4个之间
terminationGracePeriodSeconds
k8s将会给应用发送SIGTERM信号,可以用来正确、优雅地关闭应用,默认为30秒。
如果需要更优雅地关闭,则可以使用k8s提供的pre-stop lifecycle hook 的配置声明,将会在发送SIGTERM之前执行。
livenessProbe与readinessProbe
livenessProbe是kubernetes认为该pod是存活的,不存在则需要kill掉,然后再新启动一个,以达到replicas指定的个数。
readinessProbe是kubernetes认为该pod是启动成功的,这里根据每个应用的特性,自己去判断,可以执行command,也可以进行httpGet。比如对于使用java web服务的应用来说,并不是简单地说tomcat启动成功就可以对外提供服务的,还需要等待spring容器初始化,数据库连接连接上等等。对于spring boot应用,默认的actuator带有/health接口,可以用来进行启动成功的判断。
其中readinessProbe.initialDelaySeconds可以设置为系统完全启动起来所需的最少时间,livenessProbe.initialDelaySeconds可以设置为系统完全启动起来所需的最大时间+若干秒