helm开发环境部署gitea

一、部署一个pg

apiVersion: v1
kind: PersistentVolume
metadata:
  name: postgresql-pv
  labels:
    type: local
  namespace: workspace
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/data/svr/pgdb" 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: postgresql-pv-claim
  namespace: workspace
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: postsql
  name: postsql
  namespace: workspace
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postsql
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: postsql
    spec:
      containers:
      - image: postgres:latest
        name: postgres
        imagePullPolicy: IfNotPresent
        ports:
            - containerPort: 5432
        env:
            - name: POSTGRES_PASSWORD
              value: root
        volumeMounts:
            - name: postgresql-persistent-storage
              mountPath: /var/lib/postgresql/data
      volumes:
      - name: postgresql-persistent-storage
        persistentVolumeClaim:
            claimName: postgresql-pv-claim
status: {}
---
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2022-10-02T08:09:39Z"
  labels:
    app: postsql
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:labels:
          .: {}
          f:app: {}
      f:spec:
        f:externalTrafficPolicy: {}
        f:ports:
          .: {}
          k:{"port":5432,"protocol":"TCP"}:
            .: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
        f:selector:
          .: {}
          f:app: {}
        f:sessionAffinity: {}
        f:type: {}
    manager: kubectl-expose
    operation: Update
    time: "2022-10-02T08:09:39Z"
  name: postsql
  namespace: workspace
  resourceVersion: "31501"
  uid: 4e9d24a0-e09f-45a0-bd2d-45eeef16a69f
spec:
  clusterIP: 10.101.193.27
  clusterIPs:
  - 10.101.193.27
  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 30432
    port: 5432
    protocol: TCP
    targetPort: 5432
  selector:
    app: postsql
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}

说明:首先为pg部署一个pv存储数据,开启一个pvc用以Deployment开启链接。

二、部署一个gitea

apiVersion: v1
kind: PersistentVolume
metadata:
  name: gitea-pv
  labels:
    type: local
  namespace: workspace
spec:
  storageClassName: manual-gitea
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/data/gitea/data" 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: gitea-pv-claim
  namespace: workspace
spec:
  storageClassName: manual-gitea
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: gitea
  name: gitea
  namespace: workspace
spec:
  replicas: 1
  selector:
    matchLabels:
      app: gitea
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: gitea
    spec:
      initContainers:
      - command: ['sh','-c','until ping -c 3 postsql; do sleep 2 ; done ; echo ping postsql success.db is read']
        name: wait-for-postsql
        image: busybox:1.28
      containers:
      - image: gitea/gitea:latest
        name: gitea
        volumeMounts:
            - name: gitea-persistent-storage
              mountPath: /data
      volumes:
      - name: gitea-persistent-storage
        persistentVolumeClaim:
            claimName: gitea-pv-claim
status: {}
---
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2022-10-02T09:17:45Z"
  labels:
    app: gitea
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:labels:
          .: {}
          f:app: {}
      f:spec:
        f:externalTrafficPolicy: {}
        f:ports:
          .: {}
          k:{"port":3000,"protocol":"TCP"}:
            .: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
        f:selector:
          .: {}
          f:app: {}
        f:sessionAffinity: {}
        f:type: {}
    manager: kubectl-expose
    operation: Update
    time: "2022-10-02T09:17:45Z"
  name: gitea
  namespace: workspace
  resourceVersion: "37103"
  uid: 2cd5baaa-c239-47b0-a0e9-bd20d44a98ef
spec:
  clusterIP: 10.101.193.28
  clusterIPs:
  - 10.101.193.28
  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 30333
    port: 3000
    protocol: TCP
    targetPort: 3000
  selector:
    app: gitea
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}

说明:同pg的yaml一样,不同的是,这个chart安装的时候,对Deployment定义了initContainers,不停的对pg访问进行ping,获取继续执行

三、修改kube-proxy

打印kube-proxy日志kubectl logs -n kube-system kube-proxy-9mkwz注意Using iptables Proxier.,修改为Using ipvs Proxier.,目的是为了initContainers的until ping -c 3 postsql; do sleep 2 ; done ; echo ping postsql success.db is read可以正常跑通。

开启内核支持

cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sysctl -p

安装ipvsapt install ipvsadm ipset

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

修改kube-proxykubectl edit cm kube-proxy -n kube-system,找到

ipvs:
    mode: "ipvs"  #将 mode: ""   -->   mode: "ipvs"  

四、安装

使用helm create gitea 进入后删除templates内全部内容然后将 中yaml放入 ,执行helm install gitea gitea ,pg执行helm create pg ,进入后删除templates内全部内容然将 中yaml放入 ,执行helm install pg pg
在这里插入图片描述
gitea很轻量,私有环境下部署在本地使用很方便

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值