文章目录
存储
- ConfigMap:存储配置信息
- Secret:存储加密信息
- Volume:共享存储
- Persistent Volume:持久卷
ConfigMap
一般app会从配置文件,命令行参数,环境变量等读取配置信息
ConfigMap可以向容器中注入配置信息
三种创建方式:
- 使用目录创建
- 使用文件创建
- 使用字面值创建
创建ConfigMap
使用目录
# 创建存储配置文件的目录
mkdir -p ~/install-k8s/configmap
# 创建配置文件1
cat <<\EOF > ~/install-k8s/configmap/game.properties
enemies=aliens
lives=3
enemies.cheat=true
enemies.cheat.level=noGoodRotten
secret.code.passphrase=UUDDLRLRDADAS
secret.code.allowed=true
secret.code.lives=30
EOF
# 创建配置文件2
cat <<\EOF > ~/install-k8s/configmap/ui.properties
color.good=purple
color.bad-yellow
allow.textmode=true
how.nice.to.look=fairlyNice
EOF
cd ~
# 创建ConfigMap
kubectl create configmap game-config --from-file=install-k8s/configmap
–from-file指定在目录下的所有文件都会被用在ConfigMap里面创建一个键值对,
键的名字就是文件名,
值的内容就是文件的内容
kubectl get cm
# NAME DATA AGE
# game-config 2 25s
kubectl get cm game-config -o yaml
# apiVersion: v1
# kind: ConfigMap
# metadata:
# creationTimestamp: "2020-11-30T09:57:07Z"
# managedFields:
# - apiVersion: v1
# fieldsType: FieldsV1
# fieldsV1:
# f:data:
# .: {}
# f:game.properties: {}
# f:ui.properties: {}
# manager: kubectl-create
# operation: Update
# time: "2020-11-30T09:57:07Z"
# name: game-config
# namespace: default
# resourceVersion: "851169"
# selfLink: /api/v1/namespaces/default/configmaps/game-config
# uid: 6f148037-8f8e-4d65-9dc4-8a843be04dbf
# data:
# game.properties: |
# enemies=aliens
# lives=3
# enemies.cheat=true
# enemies.cheat.level=noGoodRotten
# secret.code.passphrase=UUDDLRLRDADAS
# secret.code.allowed=true
# secret.code.lives=30
# ui.properties: |
# color.good=purple
# color.bad-yellow
# allow.textmode=true
# how.nice.to.look=fairlyNice
使用文件
cd ~
kubectl create configmap game-config-2 --from-file=install-k8s/configmap/game.properties
kubectl describe cm game-config-2
# Name: game-config-2
# Namespace: default
# Labels: <none>
# Annotations: <none>
# Data
# ====
# game.properties:
# ----
# enemies=aliens
# lives=3
# enemies.cheat=true
# enemies.cheat.level=noGoodRotten
# secret.code.passphrase=UUDDLRLRDADAS
# secret.code.allowed=true
# secret.code.lives=30
# Events: <none>
使用字面值
kubectl create configmap special-config \
--from-literal=special.how=very \
--from-literal=special.type=charm
kubectl describe cm special-config
# Name: special-config
# Namespace: default
# Labels: <none>
# Annotations: <none>
# Data
# ====
# special.how:
# ----
# very
# special.type:
# ----
# charm
# Events: <none>
kubectl get cm special-config -o yaml
# apiVersion: v1
# kind: ConfigMap
# metadata:
# creationTimestamp: "2020-11-30T10:06:12Z"
# managedFields:
# - apiVersion: v1
# fieldsType: FieldsV1
# fieldsV1:
# f:data:
# .: {}
# f:special.how: {}
# f:special.type: {}
# manager: kubectl-create
# operation: Update
# time: "2020-11-30T10:06:12Z"
# name: special-config
# namespace: default
# resourceVersion: "852479"
# selfLink: /api/v1/namespaces/default/configmaps/special-config
# uid: ce6583d2-bf92-44ab-8fa6-76f26763dca3
# data:
# special.how: very
# special.type: charm
应用ConfigMap
代替环境变量 && 设置命令行参数
apiVersion: v1
kind: ConfigMap
metadata:
name: special-config
data:
special.how: very
special.type: charm
---
apiVersion: v1
kind: ConfigMap
metadata:
name: env-config
data:
log_level: INFO
---
apiVersion: v1
kind: Pod
metadata:
name: configmap-test-pod
spec:
restartPolicy: Never
containers:
- name: test-container
image: busybox
imagePullPolicy: IfNotPresent
# 定义容器内部环境变量
env:
# 正常的定义一个环境变量,名字为NORMAL_ENV_KEY,值为normal_key
- name: NORMAL_ENV_KEY
value: normal_key
# 从special-config ConfigMap导入special.how作为SPECIAL_LEVEL_KEY的值
- name: SPECIAL_LEVEL_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: special.how
# 导入env-config ConfigMap的所有data做为环境变量
envFrom:
- configMapRef:
name: env-config
# 设置命令行参数
command: ["/bin/sh", "-c", "echo $NORMAL_ENV_KEY && echo $SPECIAL_LEVEL_KEY && echo $log_level && env"]
kubectl get cm,pods -o wide
# NAME DATA AGE
# configmap/env-config 1 32s
# configmap/special-config 2 32s
# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
# pod/configmap-test-pod 0/1 Completed 0 32s 172.19.2.38 k8s-node02 <none> <none>
kubectl logs configmap-test-pod
# normal_key
# very
# INFO
# KUBERNETES_SERVICE_PORT=443
# KUBERNETES_PORT=tcp://172.18.0.1:443
# HOSTNAME=configmap-test-pod
# SHLVL=1
# HOME=/root
# KUBERNETES_PORT_443_TCP_ADDR=172.18.0.1
# PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# NORMAL_ENV_KEY=normal_key
# KUBERNETES_PORT_443_TCP_PORT=443
# KUBERNETES_PORT_443_TCP_PROTO=tcp
# SPECIAL_LEVEL_KEY=very
# log_level=INFO
# KUBERNETES_SERVICE_PORT_HTTPS=443
# KUBERNETES_PORT_443_TCP=tcp://172.18.0.1:443
# KUBERNETES_SERVICE_HOST=172.18.0.1
# PWD=/
通过数据卷插件使用ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
name: special-config
data:
special.how: very
special.type: charm
---
apiVersion: v1
kind: Pod
metadata:
name: configmap-volume-pod
spec:
restartPolicy: Never
volumes:
- name: config-volume
configMap:
name: special-config
containers:
- name: test-container
image: busybox
imagePullPolicy: IfNotPresent
volumeMounts:
- name: config-volume
mountPath: /etc/config
command: ["/bin/sh", "-c", "cat /etc/config/special.how && ls -l /etc/config/special.how"]
kubectl get cm,pod -o wide
# NAME DATA AGE
# configmap/special-config 2 75s
# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
# pod/configmap-volume-pod 0/1 Completed 0 32s 172.19.1.61
kubectl logs configmap-volume-pod
# very
# lrwxrwxrwx 1 root root 18 Dec 1 01:59 /etc/config/special.how -> ..data/special.how
ConfigMap热更新
apiVersion: v1
kind: ConfigMap
metadata:
# configmap名字,用来被其他地方调用
name: log-config
data:
log_level: INFO
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 1
selector:
matchLabels:
# 控制器匹配有下列标签的Pod,进行控制
run: my-nginx
template:
metadata:
labels:
# Pod的标签,用来被控制器匹配
run: my-nginx
spec:
volumes:
# volume名字,用来被其他地方调用
- name: config-volume
configMap:
# 调用下面名字的configmap
name: log-config
containers:
- name: nginx
image: nginx:1.8
ports:
- containerPort: 80
volumeMounts:
# 调用该volume(该volume通过configmap的名字调用相关configmap)
- name: config-volume
# 将volume挂载到下面的目录
mountPath: /etc/config
kubectl get cm,pod -o wide
# NAME DATA AGE
# configmap/log-config 1 15s
# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
# pod/my-nginx-66855bf8cd-fwvz9 1/1 Running 0 15s 172.19.1.66 k8s-node01 <none> <none>
# pod/myapp-deployment-69696c78f7-wmrpj 1/1 Terminating 0 23h 172.19.2.21 k8s-node02 <none> <none>
kubectl exec my-nginx-66855bf8cd-fwvz9 -it -- cat /etc/config/log_level
# INFO
apiVersion: v1
kind: ConfigMap
metadata:
name: log-config
data:
# 更新ConfigMap中的内容
log_level: DEBUG
kubectl apply -f yaml.yaml
# configmap/log-config configured
kubectl describe cm log-config
# Name: log-config
# Namespace: default
# Labels: <none>
# Annotations: <none>
# Data
# ====
# log_level:
# ----
# DEBUG
# Events: <none>
kubectl exec my-nginx-66855bf8cd-fwvz9 -it -- cat /etc/config/log_level
# DEBUG
Secret
保存敏感信息
有三种类型的Secret:
- Service Account:用来访问Kubernetes API,由Kubernetes自动创建,并会自动挂载到Pod的/run/secrets/kubernetes.io/serviceaccount目录中
- Opaque:base64编码格式的Secret,用来存储密码、密钥等
- kubernetes.io/dockerconfigjson:用来存储私有docker registry的认证信息
Service Account
Pod需要访问API Server才会有该信息
kubectl run -it test --image=nginx:1.8 --image-pull-policy=IfNotPresent --restart=Never --rm=true -- ls -l /run/secrets/kubernetes.io/serviceaccount
# total 0
# lrwxrwxrwx 1 root root 13 Dec 1 02:50 ca.crt -> ..data/ca.crt
# lrwxrwxrwx 1 root root 16 Dec 1 02:50 namespace -> ..data/namespace
# lrwxrwxrwx 1 root root 12 Dec 1 02:50 token -> ..data/token
# pod "test" deleted
Opaque
# 创建base64信息
echo -n "admin" | base64
# YWRtaW4=
# echo -n "YWRtaW4=" | base64 -d
echo -n "123456" | base64
# MTIzNDU2
apiVersion: v1
kind: Secret
metadata:
name: secret-test
type: Opaque
data:
username: YWRtaW4=
password: MTIzNDU2
---
apiVersion: v1
kind: Pod
metadata:
name: secret-test
spec:
restartPolicy: Never
volumes:
- name: secrets-volume
secret:
secretName: secret-test
containers:
- name: busybox
image: busybox
imagePullPolicy: IfNotPresent
env:
# 在环境变量中使用secret
- name: TEST_USERNAME
valueFrom:
secretKeyRef:
# 调用该名称sercet
name: secret-test
key: username
# 使用挂载方式使用secret
volumeMounts:
- name: secrets-volume
mountPath: /etc/secrets
readOnly: true
# 使用secret信息时自动完成base64解密
command: ["/bin/sh", "-c", "cat /etc/secrets/username && cat /etc/secrets/password && echo $TEST_USERNAME"]
kubectl get secret,pod -o wide
# NAME TYPE DATA AGE
# secret/default-token-cn9cf kubernetes.io/service-account-token 3 4d20h
# secret/secret-test Opaque 2 5s
# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
# pod/secret-test 0/1 Completed 0 5s 172.19.1.77 k8s-node01 <none> <none>
kubectl logs secret-test
# admin123456admin
# 使用secret信息时自动完成base64解密
kubernetes.io/dockerconfigjson
存储私有docker registry的认证信息
# 创建私有docker registry认证的secret
kubectl create secret docker-registry myregistrykey \
--docker-server=DOCKER_REGISTRY_SERVER \
--docker-username=DOCKER_USER \
--docker-password=DOCKER_PASSWORD \
--docker-email=DOCKER_EMAIL
apiVersion: v1
kind: Pod
metadata:
name: test-docker-registry
spec:
containers:
- name: test-container
# 输入创建了secret的私有仓库地址/仓库/镜像:标记
image: DOCKER_REGISTRY_SERVER/REPOSITORY/IMAGE_NAME:TAG
# 调用创建的secret进行认证
imagePullSecrets:
# 私有仓库secret名字
- name: myregistrykey
Volume
卷的生命与Pod相同,
所以卷的生命比Pod种所有容器都长,
当容器重启时,卷中数据依旧存在,
如果Pod死亡,卷也将死亡
kubernetes支持多种类型的卷
emptyDir卷
当Pod分配给节点时,首先创建emptyDir卷,
并且只要该Pod在该节点运行,该卷就会存在
apiVersion: v1
kind: Pod
metadata:
name: test-pod
spec:
containers:
- name: test-container
image: nginx:1.8
volumeMounts:
- mountPath: /cache
name: cache-colume
volumes:
- name: cache-colume
emptyDir: {}
hostPath
将主机节点的文件系统中的文件或目录挂载到集群中
apiVersion: v1
kind: Pod
metadata:
name: test-pod
spec:
containers:
- name: test-container
image: nginx:1.8
volumeMounts:
- mountPath: /test-data
name: test-colume
volumes:
- name: test-colume
hostPath:
path: /data
type: Directory
PV && PVC
PV是持久卷,屏蔽了各种远程存储的细节,形成统一层,具有独立的生命周期
- 静态PV:带有可供集群用户使用的实际存储的细节
- 动态PV:当静态PV无法匹配用户需求时,集群尝试为PVC动态创建卷
PVC请求具有相关特性的PV资源,用于Pod
PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-01
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
# 访问模式
accessModes:
# 只有一个客户端可以来读写
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
# 被用于匹配pvc的请求
storageClassName: slow
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /tmp
server: 172.16.22.230
持久化演示说明 - NFS
安装NFS服务器
yum -y install nfs-common nfs-utils rpcbind
# 创建NFS目录
mkdir /nfsdata
chmod 777 /nfsdata
chown nfsnobody /nfsdata
# 定义NFS目录
echo "/nfsdata *(rw,no_root_squash,no_all_squash,sync)" >> /etc/exports
# NFS基于RPC,启动RPC服务
systemctl start rpcbind && systemctl enable rpcbind
# 启动NFS
systemctl start nfs && systemctl enable nfs
iptables -F
安装NFS客户端
yum -y install nfs-utils rpcbind
# 测试NFS联通性,以及NFS下的目录
showmount -e NFS_SERVER_ADDRESS
mkdir /test
# 将NFS挂载到本地进行测试
mount -t nfs NFS_SERVER_ADDRESS:/nfsdata /test
cd /test
echo "test nfs" >> index.html
cd /
umount /test/
rm -rf /test
部署PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-01
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
# 被用于匹配pvc的请求
storageClassName: nfs-01
nfs:
path: /nfsdata
server: 172.16.22.230
kubectl get pv -o wide
# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
# pv-nfs-01 2Gi RWO Retain Available nfs-01 29s Filesystem
创建服务并使用PVC
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- name: web
port: 80
# 无头Service实例
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
# 控制器
kind: StatefulSet
metadata:
name: web
spec:
# 匹配Pod标签,组成逻辑组
selector:
matchLabels:
app: nginx
serviceName: nginx
replicas: 1
# PVC请求模板
volumeClaimTemplates:
- metadata:
# PVC的名字,用于容器挂载时关联
name: www
spec:
# PVC请求以下accessModes的PV
accessModes:
- ReadWriteOnce
# PVC请求以下storageClassName的PV
storageClassName: nfs-01
resources:
requests:
# 请求的空间
storage: 1Gi
# Pod模板
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.19-alpine
ports:
- name: web
containerPort: 80
# 挂载PVC请求到的空间
volumeMounts:
# 挂载以下名字的PVC
- name: www
# 挂载到容器的以下目录
mountPath: /usr/share/nginx/html
kubectl get svc,statefulset,pod,pv,pvc -o wide
# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
# service/kubernetes ClusterIP 172.18.0.1 <none> 443/TCP 31h <none>
# service/nginx ClusterIP None <none> 80/TCP 61s app=nginx
# NAME READY AGE CONTAINERS IMAGES
# statefulset.apps/web 1/1 61s nginx nginx:1.19-alpine
# NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
# pod/web-0 1/1 Running 0 61s 172.19.1.81 k8s-node01 <none> <none>
# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
# persistentvolume/pv-nfs-01 2Gi RWO Retain Bound default/www-web-0 nfs-01 23m Filesystem
# NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
# persistentvolumeclaim/www-web-0 Bound pv-nfs-01 2Gi RWO nfs-01 61s Filesystem
kubectl run -it test --image=busybox --image-pull-policy=IfNotPresent --restart=Never --rm=true -- wget http://nginx -O - | cat -
# Connecting to nginx (172.19.1.81:80)
# test nfs
kubectl run -it test --image=busybox --image-pull-policy=IfNotPresent --restart=Never --rm=true -- wget http://web-0.nginx -O - | cat -
# Connecting to nginx (172.19.1.81:80)
# test nfs
curl http://172.19.1.81
# test nfs
删除service、statefulset和pvc后,pv的资源需要手动释放,才能status:Released --> status:Available
kubectl get pv
# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
# pv-nfs-01 2Gi RWO Retain Released default/www-web-0 nfs-01 68m
kubectl edit pv pv-nfs-01
# 删除 claimRef: 下所有内容,包括 claimRef:
kubectl get pv
# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
# pv-nfs-01 2Gi RWO Retain Available nfs-01 69m