Redis-cluster
#强制删除容器
kubectl delete po 容器名 --froce --grace-period=0
#筛选容器名
kebectl get po | awk 'NR>1{print $1}'
#循环删除容器
for i in `kubectl get po | awk 'NR>1{print $1}'`;do kubectl delete po $i --force --grace-period=0;done
docker pull alpine
docker pull scratch
#使用这个镜像
docker pull redis:5-alpine
cd redis-cluster-in-k8s/
touch redis-pod.yaml
#导出yaml文件
kubectl run -h
#例:kubectl run redis-pod --image=redis:5-alpine --port=6379 --dry-run=client -oyaml > ~/redis-cluster-in-k8s/redis-pod.yaml
vim redis-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: redis-pod
name: redis-pod
spec:
containers:
- image: redis:5-alpine
imagePullPolicy: IfNotPresent
name: redis-pod
ports:
- containerPort: 6379
kubectl apply -f redis-pod.yaml
kubectl get po -owide
kubectl exec -it redis-pod -- /bin/sh
cd
cd /etc
ls
exit
kubectl delete -f redis-pod.yaml
configmap的引用
docker pull redis:5-alpine
cd redis-cluster-in-k8s/
#有这个文件就不用写了
vim redis.conf
protected-mode no
daemonize no
appendonly yes
cluster-enabled yes
cluster-config-file "nodes.conf"
cluster-node-timeout 5000
dir /data
port 6379
kubectl create configmap redis-cm --from-file=redis.conf --dry-run=client -oyaml > redis-cm.yaml
#进下面这个文件删除倒数第二行 creation...
vim redis-cm.yaml
kubectl apply -f redis-cm.yaml
kubectl get cm
#百科全书
kubectl explain pod.spec
vim redis-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: redis-pod
name: redis-pod
spec:
volumes:
- name: cm
configMap:
name: redis-cm
containers:
- image: redis:5-alpine
imagePullPolicy: IfNotPresent
name: redis-pod
ports:
- containerPort: 6379
command: [ "sh" ,"-c" ,"redis-server /etc/redis/redis.conf"]
volumeMounts :
- name: cm
mountPath: /etc/redis/
kubectl apply -f redis-pod.yaml
kubectl get po
kubectl exec -it redis-pod -- /bin/sh #进容器
ps -ef
cd /etc/redis
ls #查看是否有redis.conf
cd /data/
ls #查看是否有nodes.conf appendonly.aoy
exit
挂载cm和卷组
#删除容器
kubectl delete po redis-pod
vim redis-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
labels:
run: redis-pod
name: redis-pod
spec:
volumes:
- name: cm
configMap:
name: redis-cm
- name: data
persistentVolumeClaim:
claimName: redis-pvc
containers:
- image: redis:5-alpine
imagePullPolicy: IfNotPresent
name: redis-pod
ports:
- containerPort: 6379
command: ["sh","-c","redis-server /etc/redis/redis.conf"]
volumeMounts:
- name: cm
mountPath: /etc/redis/
- name: data
mountPath: /data
#
vim redis-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: rook-ceph-block
kubectl apply -f redis-pvc.yaml
kubectl get svc
kubectl get pv
kubectl apply -f redis-pod.yaml
kubectl get po -owide
kubectl exec -it redis-pod -- /bin/sh
ls #有个带蓝色的文件
touch 1.txt
echo "buihioad" >> 1.txt
exit
#下面是查看数据是否持久化
kubectl delete -f redis-pod.yaml
kubectl apply -f redis-pod.yaml
kubectl exec -it redis-pod -- /bin/sh
ls #查看1.txt是否还在
exit
创建sts控制器
kubectl delete po redis-pod --force --grace-period=0
#查看是否还在
kubectl get po
kubectl delete pvc redis-pvc
kubectl create deployment redis-sts --image=redis:5-alpine --dry-run=client -oyaml > redis-sts.yaml
#无头服务
#vim redis-headless.yaml
apiVersion: v1
kind: Service
metadata :
labels:
app: redis-sts-svc
name: redis-headless
spec:
ports:
- name: redis-service
port: 6379
clusterIP: None
selector :
app: redis-sts
kubectl apply -f redis-headless.yaml
kubectl get svc
kubectl describe svc redis-headless
kubectl describe SVC redis-headless
#vim redis-sts.yaml
apiversion: apps/v1
kind: StatefulSet
metadata:
labels:
app: redis-sts
name: redis-sts
spec:
volumeClaimTemplates:
- metadata :
name: data
spec:
accessModes: [ "ReadWr ite0nce”]
resources:
requests:
storage: 1Gi
serviceName: redis-headless
replicas: 3
selector:
matchLabels:
app: redis-sts
template:
metadata:
labels:
app: redis-sts
spec:
volumes:
- name: Cm
configMap:
name: redis-cm
containers:
- image: redis:5-alpine
imagePullPolicy: IfNotPresent
name: redis-pod
ports:
- containerPort: 6379
command: [ "sh","-C","redis-server /etc/redis/redis. conf"]
volumeMounts:
- name: cm
mountPath: /etc/redis/
- name: data
mountPath: /data
kubectl apply -f redis-sts.yaml
ep原理
灰度发布
1. Dockerfile的编写:
mkdir dockerfile && cd dockerfile
vim Dockerfile #注意: Dockerfile的文件名首字母要大写
FROM alpine:latest
MAINTAINER "ZCF <zcf@zczf.com>"
ENV NGX_DOC_ROOT="/var/lib/nginx/html" HOSTNAME="" IP="" PORT="" INDEX_PAGE=""
RUN apk --no-cache add nginx && mkdir -p ${NGX_DOC_ROOT}/shop /run/nginx
COPY chk.html ${NGX_DOC_ROOT}
COPY entrypoint.sh /bin
CMD ["/usr/sbin/nginx","-g","daemon off;"] #定义启动nginx服务为前端启动, -g:是global段,中修改daemon off;
ENTRYPOINT ["/bin/entrypoint.sh"] #将CMD的命令,作为参数传递给/bin/entrypoint.sh 脚本.
#准备Dockerfile配套的基础文件:
1) 启动容器时,执行的脚本文件: entrypoint.sh
vim entrypoint.sh
#!/bin/sh
echo "<h1>WELCOME TO ${HOSTNAME:-www.zcf.com} WEB SITE | `date` | `hostname` | `hostname -i` | -${YOU_INFO:-v1}- | </h1>" > ${NGX_DOC_ROOT}/index.html
cat > /etc/nginx/conf.d/default.conf <<EOF
server {
server_name ${HOSTNAME:-www.zcf.com};
listen ${IP:-0.0.0.0}:${PORT:-80};
root ${NGX_DOC_ROOT};
location / {
index ${INDEX_PAGE} index.html index.htm;
}
location = /404.html {
internal;
}
}
EOF
exec "$@" #它就是来接受CMD传入的参数的.
2 ) 给entrypoint.sh 添加执行权限
chown +x entrypoint.sh
3) 后期做健康检查时,使用的html文件:
echo OK > chk.html
2. 开始制作docker镜像文件:
docker build --tag myapp:v1 ./
3. 将制作好的镜像文件,打上标签,并上传到harbor上。
docker login harbor.zcf.com -u admin -p 123456 #登录harbor
docker tag myapp:v1 harbor.zcf.com/k8s/myapp:v1 #先打上harbor仓库路径
docker push harbor.zcf.com/k8s/myapp:v1 #再上传镜像到harbor上。
4. 为了方便延时恢复发布的效果,我们还需要在制作一个镜像
docker run -d --name ngx1 -e YOU_INFO="DIY-HelloWorld-v2" harbor.zcf.com/k8s/myapp:v1
#说明: -e 是指定要传递给容器的环境变量, 因为我提前在myapp中启动脚本entrypoint.sh中使用的了YOU_INFO这个环境变量,
#因此,这里我可以直接给容器传递这个变量,来实现修改nginx首页的效果.
docker commit --pause ngx1 #将ngx1暂停,并将当前容器状态,导出为一个新镜像。
docker kill ngx1 && docker rm -fv ngx1 #制作完镜像,就直接删除测试ngx1容器.
root@k8s-n1:~# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 85355d4af36c 6 seconds ago 7.02MB #这个就上刚制作的新镜像.
#给刚制作好的镜像打上标签:harbor.zcf.com/k8s/myapp:v2,便于上传到harbor上。
docker tag 85355d4af36c harbor.zcf.com/k8s/myapp:v2
#测试运行镜像,若没有问题,就可以上传到本地harbor上了。
docker run -p 83:80 --rm -d --name ngx1 harbor.zcf.com/k8s/myapp:v2
root@k8s-n1:~# curl http://192.168.111.80:83/ #测试镜像是否修改了nginx的首页为YOU_INFO的内容.
<h1>WERCOME TO www.zcf.com WEB SITE | Fri Jul 19 02:31:13 UTC 2019 | ec4f08f831de | 172.17.0.2 | -DIY-HelloWorld-v2- | </h1>
docker kill ngx1 #删除ngx1容器.
docker push harbor.zcf.com/k8s/myapp:v2 #最后,上传新镜像到harbor上.
5. 现在已经有了,myapp:v1 和 myapp:v2 那就可以开始K8s的灰度发布测试了。
#先创建三个pod,一个Client,两个Nginx
#1. 先创建 Client
kubectl run client --image=harbor.zcf.com/k8s/alpine:v1 --replicas=1
#注意: alpine:是一个最小化的Linux系统,很多开源镜像站都可以下载到.
kubectl get pods -o wide #查看Pod的创建详情.
#2. 创建Nginx
kubectl run nginx --image=harbor.zcf.com/k8s/myapp:v1 --port=80 --replicas=2
kubectl get deployment -w #watch着监控k8s帮我们创建2个pod的过程.
kubectl get pod -o wide
#3. 登录Client,测试访问Nginx
root@k8s-m1:/etc/ansible# kubectl get pod
NAME READY STATUS RESTARTS AGE
client-f5cdb799f-2wsmr 1/1 Running 2 16h
nginx-6d6d8b685-7t7xj 1/1 Running 0 99m
nginx-6d6d8b685-xpx5r 1/1 Running 0 99m
kubectl exec -it client-f5cdb799f-2wsmr sh
/ # ip addr
/ # for i in `seq 1000`; do wget -O - -q http://nginx/ ; sleep 1; done
/ # #说明: 若你的kube-dns没有部署成功,这里的nginx可换成Service的IP.
/ # # kubectl get svc |grep nginx #这个就是Nginx的Service的集群IP.
#4. 以上测试可看到,已经能够实现负载均衡的效果了。
接着,开始进行灰度发布测试
#更新myapp的镜像为myapp:v2
kubectl set image --help
kubectl set image deployment myapp myapp=harbor.zcf.com/k8s/myapp:v2 #升级myapp的镜像为myapp:v2
#上面执行命令时,就可以看着,另一个终端中Client的访问变化情况,你可以发现,访问逐渐从 v1 变成 DIY-HelloWorld-v2了。
#5.测试动态调整nginx Pod的数量
kubectl scale --replicas=5 deployment nginx #修改nginx的Pod副本数量为5个.
kubectl get pods
#接着在到Client所在的终端上,查看变化,你会发现,主机名和IP部分开始有更多变化了。
#6. 查看nginx镜像升级状态,是否成功
kubectl rollout status deployment nginx
#7. 再查看myapp的镜像是否已经升级为最新的了
kubectl describe pods nginx-xxx-xx
#8. 将myapp回滚到之前的版本,即v1版本
kubectl rollout undo --help
kubectl rollout undo deployment nginx
6. 测试K8s集群外部访问nginx
#修改 myapp service的类型,让它能被集群外部的客户端访问.
kubectl edit svc myapp
#type: ClusterIP 把它修改为 type:NodePort
#查看svc的更新信息:
kubectl get svc #这里就可以看到,myap service的端口将动态增加一个. 如:80:30020/TCP,注意:30020是随机分配的。
#它的范围是,你在使用kubeasz部署时,设置 NODE_PORT_RANGE="30000-60000"中随机选的.
#接着就可以在集群外部的客户端去访问myapp了
http://Master或Node的物理IP:30020/
直接pod的启动
#v1版本
vim v1.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: tomcat-test
name: tomcat-test-v1
namespace: test
spec:
minReadySeconds: 100
replicas: 4
revisionHistoryLimit: 5
selector:
matchLabels:
app: tomcat-test
version: v1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
initializer.kubernetes.io/lxcfs: 'true'
labels:
app: tomcat-test
version: v1
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- tomcat-test
topologyKey: kubernetes.io/hostname
containers:
- env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
value: tomcat-test
- name: version
value: v1
image: vnet01-harbor.sy.cn/test/tomcat-test:r-20200214_175535
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /abc/check_health.jsp
port: 6080
initialDelaySeconds: 80
timeoutSeconds: 20
name: tomcat-test
readinessProbe:
httpGet:
path: /abc/check_health.jsp
port: 6080
initialDelaySeconds: 80
timeoutSeconds: 20
resources:
limits:
cpu: '2'
memory: 4096M
requests:
cpu: '1'
memory: 2048M
volumeMounts:
- mountPath: /data/logs
name: app-log
readOnly: false
nodeSelector:
apptype: memnode
terminationGracePeriodSeconds: 60
volumes:
- hostPath:
path: /var/lib/docker/logs/tomcat-test
name: app-log
kubectl apply -f v1.yaml
#v2版本
vim v2.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: tomcat-test
name: tomcat-test-v2
namespace: test
spec:
minReadySeconds: 100
replicas: 1
revisionHistoryLimit: 5
selector:
matchLabels:
app: tomcat-test
version: v2
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
initializer.kubernetes.io/lxcfs: 'true'
labels:
app: tomcat-test
version: v2
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- tomcat-test
topologyKey: kubernetes.io/hostname
containers:
- env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
value: tomcat-test
- name: version
value: v2
image: vnet01-harbor.sy.cn/test/tomcat-test:r-20200319_101302
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /abc/check_health.jsp
port: 6080
initialDelaySeconds: 80
timeoutSeconds: 20
name: tomcat-test
readinessProbe:
httpGet:
path: /abc/check_health.jsp
port: 6080
initialDelaySeconds: 80
timeoutSeconds: 20
resources:
limits:
cpu: '2'
memory: 4096M
requests:
cpu: '1'
memory: 2048M
volumeMounts:
- mountPath: /data/logs
name: app-log
readOnly: false
nodeSelector:
apptype: memnode
terminationGracePeriodSeconds: 60
volumes:
- hostPath:
path: /var/lib/docker/logs/tomcat-test
name: app-log
kubectl apply -f v2.yaml
k8s部署Redis-cluster
最新推荐文章于 2024-08-06 18:02:31 发布