一、准备工作
k8s部署见:
https://blog.csdn.net/oToyix/article/details/117963839
ceph集群部署 见:
https://blog.csdn.net/oToyix/article/details/118307711
yaml文件两个
ceph-nginx-pv-pvc.yaml (PV、PVC定义)
nginx.yaml (Deployment、service)
k8s所有节点安装 ceph-common
yum install ceph-common -y
k8s 部署ceph Secrets
1、从ceph中拿到key值,即ceph中的/etc/ceph/ceph.client.admin.keyring文件中的key部分
cat /etc/ceph/ceph.client.admin.keyring |sed -n '$p'|awk '{print $3}'
AQAXg/dgroPcNRAAdu5nG/5NIFL+1eLLk5hTxA==
也可以
ceph auth get-key client.admin > /secret.txt
2、master中执行
scp /secret.txt root@192.168.0.47:/
kubectl create secret generic ceph-admin-secret --from-file=/secret.txt
二、部署
kubectl apply -f ceph-nginx-pv-pvc.yaml
kubectl apply -f nginx.yaml
内容见下:
cat ceph-nginx-pv-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-html-ceph-pv
namespace: default
labels:
pv: html-ceph-pv
spec:
capacity:
storage: 20G
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.0.57:6789
path: /nginx/html
user: admin
secretRef:
name: ceph-admin-secret
readOnly: false
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-html-ceph-pvc
namespace: default
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10G
selector:
matchLabels:
pv: html-ceph-pv
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-conf-ceph-pv
namespace: default
labels:
pv: conf-ceph-pv
spec:
capacity:
storage: 20G
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.0.57:6789
path: /nginx/conf/vhost/www.conf
user: admin
secretRef:
name: ceph-admin-secret
readOnly: false
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-conf-ceph-pvc
namespace: default
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10G
selector:
matchLabels:
pv: conf-ceph-pv
cat nginx.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-v1
namespace: default
labels:
k8s-app: nginx-v1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: nginx-v1
template:
metadata:
name: nginx-v1
labels:
k8s-app: nginx-v1
spec:
containers:
- name: nginx-v1
image: 'nginx:latest'
imagePullPolicy: Always
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nginx-html
- mountPath: /etc/nginx/conf.d/vhost/www.conf
name: nginx-conf
ports:
- containerPort: 80
volumes:
- name: nginx-html
persistentVolumeClaim:
claimName: nginx-html-ceph-pvc
- name: nginx-conf
persistentVolumeClaim:
claimName: nginx-conf-ceph-pvc
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: nginx-v1
namespace: default
labels:
k8s-app: nginx-v1
spec:
type: LoadBalancer
ports:
- name: tcp-8888-81-rpxsl
port: 8880
targetPort: 80
nodePort: 30680
- name: tcp-8881-80-rpxsl
port: 8881
targetPort: 81
nodePort: 30681
selector:
k8s-app: nginx-v1
clusterIP: 10.10.214.146
clusterIPs:
- 10.10.214.146
查看效果:
[root@master1 ceph_jtpv]# kubectl get pod|grep nginx
nginx-v1-c8fc6c644-jsmcc 1/1 Running 0 36m
[root@master1 ceph_jtpv]# kubectl exec -it nginx-v1-c8fc6c644-jsmcc bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-v1-c8fc6c644-jsmcc:/#
root@nginx-v1-c8fc6c644-jsmcc:/# ls /usr/share/nginx/html/
index.html
root@nginx-v1-c8fc6c644-jsmcc:/# ls /etc/nginx/conf.d/vhost/
www.conf
root@nginx-v1-c8fc6c644-jsmcc:/#
root@nginx-v1-c8fc6c644-jsmcc:/# exit
exit
[root@master1 ceph_jtpv]# curl 192.168.0.47:30680
ceph
----------------------------end