K8S学习笔记0515

业务容器化案例 PV/PVC以及zookeeper

构建zookeeper镜像并测试

[root@k8s-master1 zook]# cat Dockerfile 
#FROM harbor-linux38.local.com/linux38/slim_java:8 
FROM k8s-harbor.com/public/slim_java:8

ENV ZK_VERSION 3.4.14
ADD repositories /etc/apk/repositories 
# Download Zookeeper
COPY zookeeper-3.4.14.tar.gz /tmp/zk.tgz
COPY zookeeper-3.4.14.tar.gz.asc /tmp/zk.tgz.asc
COPY KEYS /tmp/KEYS
RUN apk add --no-cache --virtual .build-deps \
      ca-certificates   \
      gnupg             \
      tar               \
      wget &&           \
    #
    # Install dependencies
    apk add --no-cache  \
      bash &&           \
    #
    #
    # Verify the signature
    export GNUPGHOME="$(mktemp -d)" && \
    gpg -q --batch --import /tmp/KEYS && \
    gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \
    #
    # Set up directories
    #
    mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \
    #
    # Install
    tar -x -C /zookeeper --strip-components=1 --no-same-owner -f /tmp/zk.tgz && \
    #
    # Slim down
    cd /zookeeper && \
    cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \
    rm -rf \
      *.txt \
      *.xml \
      bin/README.txt \
      bin/*.cmd \
      conf/* \
      contrib \
      dist-maven \
      docs \
      lib/*.txt \
      lib/cobertura \
      lib/jdiff \
      recipes \
      src \
      zookeeper-*.asc \
      zookeeper-*.md5 \
      zookeeper-*.sha1 && \
    #
    # Clean up
    apk del .build-deps && \
    rm -rf /tmp/* "$GNUPGHOME"

COPY conf /zookeeper/conf/
COPY bin/zkReady.sh /zookeeper/bin/
COPY entrypoint.sh /

ENV PATH=/zookeeper/bin:${PATH} \
    ZOO_LOG_DIR=/zookeeper/log \
    ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
    JMXPORT=9010

ENTRYPOINT [ "/entrypoint.sh" ]

CMD [ "zkServer.sh", "start-foreground" ]

EXPOSE 2181 2888 3888 9010
[root@k8s-master1 zook]# cat entrypoint.sh 
#!/bin/bash

echo ${MYID:-1} > /zookeeper/data/myid

if [ -n "$SERVERS" ]; then
	IFS=\, read -a servers <<<"$SERVERS"
	for i in "${!servers[@]}"; do 
		printf "\nserver.%i=%s:2888:3888" "$((1 + $i))" "${servers[$i]}" >> /zookeeper/conf/zoo.cfg
	done
fi

cd /zookeeper
exec "$@"

 验证zookeeper镜像能否被正常启动

创建PV/PVC

创建PV

[root@k8s-master1 pv]# cat zookeeper-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-1
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144 
    path: /data/k8sdata/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.31.7.109  
    path: /data/k8sdata/magedu/zookeeper-datadir-3 
[root@k8s-master1 pv]# vim zookeeper-persistentvolume.yaml 
[root@k8s-master1 pv]# clear
[root@k8s-master1 pv]# cat zookeeper-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-1
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144 
    path: /data/k8sdata/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144  
    path: /data/k8sdata/zookeeper-datadir-3 

 创建对应PVC

[root@k8s-master1 pv]# cat zookeeper-persistentvolumeclaim.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-1
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-1
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-2
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-2
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-3
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-3
  resources:
    requests:
      storage: 10Gi

运行zookeeper集群

创建yaml

[root@k8s-master1 zookeeper]# cat zookeeper.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: magedu
spec:
  ports:
    - name: client
      port: 2181
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32181
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32182
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32183
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: k8s-harbor.com/public/zookeeper:20220515
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-1 
      volumes:
        - name: zookeeper-datadir-pvc-1 
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-1
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: k8s-harbor.com/public/zookeeper:20220515
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-2 
      volumes:
        - name: zookeeper-datadir-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: k8s-harbor.com/public/zookeeper:20220515
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-3
      volumes:
        - name: zookeeper-datadir-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-pvc-3

验证集群状态

 验证集群选举

 

选举机制
1、Serverid:服务器ID
比如有三台服务器,编号分别是1,2,3。

编号越大在选择算法中的权重越大。

2、Zxid:数据ID
服务器中存放的最大数据ID.

值越大说明数据越新,在选举算法中数据越新权重越大。

3、Epoch:逻辑时钟
或者叫投票的次数,同一轮投票过程中的逻辑时钟值是相同的。每投完一次票这个数据就会增加,然后与接收到的其它服务器返回的投票信息中的数值相比,根据不同的值做出不同的判断。

4、Server状态:选举状态
LOOKING,竞选状态。
FOLLOWING,随从状态,同步leader状态,参与投票。
OBSERVING,观察状态,同步leader状态,不参与投票。
LEADING,领导者状态。

在投票完成后,需要将投票信息发送给集群中的所有服务器,它包含如下内容。

服务器ID
数据ID
逻辑时钟
选举状态
选举流程详述
一、首先开始选举阶段,每个Server读取自身的zxid。

二、发送投票信息

   a、首先,每个Server第一轮都会投票给自己。

   b、投票信息包含 :所选举leader的Serverid,Zxid,Epoch。Epoch会随着选举轮数的增加而递增。

三、接收投票信息

  1、如果服务器B接收到服务器A的数据(服务器A处于选举状态(LOOKING 状态)

     1)首先,判断逻辑时钟值:

    a)如果发送过来的逻辑时钟Epoch大于目前的逻辑时钟。首先,更新本逻辑时钟Epoch,同时清空本轮逻辑时钟收集到的来自其他server的选举数据。然后,判断是否需要更新当前自己的选举leader Serverid。判断规则rules judging:保存的zxid最大值和leader Serverid来进行判断的。先看数据zxid,数据zxid大者胜出;其次再判断leader Serverid,leader Serverid大者胜出;然后再将自身最新的选举结果(也就是上面提到的三种数据(leader Serverid,Zxid,Epoch)广播给其他server)

    b)如果发送过来的逻辑时钟Epoch小于目前的逻辑时钟。说明对方server在一个相对较早的Epoch中,这里只需要将本机的三种数据(leader Serverid,Zxid,Epoch)发送过去就行。

    c)如果发送过来的逻辑时钟Epoch等于目前的逻辑时钟。再根据上述判断规则rules judging来选举leader ,然后再将自身最新的选举结果(也就是上面提到的三种数据(leader  Serverid,Zxid,Epoch)广播给其他server)。

    2)其次,判断服务器是不是已经收集到了所有服务器的选举状态:若是,根据选举结果设置自己的角色(FOLLOWING还是LEADER),退出选举过程就是了。

最后,若没有收到没有收集到所有服务器的选举状态:也可以判断一下根据以上过程之后最新的选举leader是不是得到了超过半数以上服务器的支持,如果是,那么尝试在200ms内接收一下数据,如果没有新的数据到来,说明大家都已经默认了这个结果,同样也设置角色退出选举过程。

  2、 如果所接收服务器A处在其它状态(FOLLOWING或者LEADING)。

    a)逻辑时钟Epoch等于目前的逻辑时钟,将该数据保存到recvset。此时Server已经处于LEADING状态,说明此时这个server已经投票选出结果。若此时这个接收服务器宣称自己是leader, 那么将判断是不是有半数以上的服务器选举它,如果是则设置选举状态退出选举过程。
    b) 否则这是一条与当前逻辑时钟不符合的消息,那么说明在另一个选举过程中已经有了选举结果,于是将该选举结果加入到outofelection集合中,再根据outofelection来判断是否可以结束选举,如果可以也是保存逻辑时钟,设置选举状态,退出选举过程。

业务容器化案例 PV/PVC以及redis集群

构建redis镜像

[root@k8s-master1 redis]# cat Dockerfile 
#Redis Image
FROM k8s-harbor.com/public/centos-base:7.8.2003


ADD redis-4.0.14.tar.gz /usr/local/src
RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server  /usr/sbin/ && mkdir -pv /data/redis-data 
ADD redis.conf /usr/local/redis/redis.conf 
ADD run_redis.sh /usr/local/redis/run_redis.sh

EXPOSE 6379

CMD ["/usr/local/redis/run_redis.sh"]

上传到harbor仓库

创建PV/PVC

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv0
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis0 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv1
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv2
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv3
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis3 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv4
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis4 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv5
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.226.144
    path: /data/k8sdata/redis-cluster/redis5 

部署redis cluster

配置conf文件

vim redis.conf
appendonly yes
cluster-enabled yes    #集群必须开启
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379

基于conf文件创建configmap

kubectl create configmap redis-conf --from-file=redis.conf -n  magedu
kubectl  get configmaps  -n magedu

kubectl  describe  configmaps redis-conf -n magedu

 创建pod

[root@k8s-master1 rediscluster]# cat redis.yaml 
apiVersion: v1
kind: Service
metadata:
  name: redis
  namespace: magedu
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis
    port: 6379
  clusterIP: None
  
---
apiVersion: v1
kind: Service
metadata:
  name: redis-access
  namespace: magedu
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis-access
    protocol: TCP
    port: 6379
    targetPort: 6379

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis
  namespace: magedu
spec:
  serviceName: redis
  replicas: 6
  selector:
    matchLabels:
      app: redis
      appCluster: redis-cluster
  template:
    metadata:
      labels:
        app: redis
        appCluster: redis-cluster
    spec:
      terminationGracePeriodSeconds: 20
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - redis
              topologyKey: kubernetes.io/hostname
      containers:
      - name: redis
        image: redis:4.0.14
        command:
          - "redis-server"
        args:
          - "/etc/redis/redis.conf"
          - "--protected-mode"
          - "no"
        resources:
          requests:
            cpu: "200m"
            memory: "200Mi"
        ports:
        - containerPort: 6379
          name: redis
          protocol: TCP
        - containerPort: 16379    #集群端口
          name: cluster
          protocol: TCP
        volumeMounts:
        - name: conf
          mountPath: /etc/redis
        - name: data
          mountPath: /var/lib/redis
      volumes:
      - name: conf
        configMap:
          name: redis-conf
          items:
          - key: redis.conf
            path: redis.conf
  volumeClaimTemplates:
  - metadata:
      name: data
      namespace: magedu
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 5Gi

 初始化redis cluster

准备工作

初始化只需要初始化一次,redis 4及之前的版本需要使用redis-tribe工具进行初始化,redis 5开始使用redis-cli。
kubectl run -it ubuntu1804 --image=ubuntu:18.04 --restart=Never -n magedu bash

apt update
apt install  python2.7 python-pip redis-tools dnsutils iputils-ping net-tools
 pip install --upgrade pip
pip install redis-trib==0.5.1

初始化集群

将前三台添加为master
redis-trib.py create \
  `dig +short redis-0.redis.magedu.svc.k8s.local`:6379 \
  `dig +short redis-2.redis.magedu.svc.k8s.local`:6379 \
  `dig +short redis-3.redis.magedu.svc.k8s.local`:6379



分别将其余三台节点添加为node
 redis-trib.py replicate \
  --master-addr `dig +short redis-0.redis.magedu.svc.k8s.local`:6379 \
  --slave-addr `dig +short redis-3.redis.magedu.svc.k8s.local`:6379

  
   redis-trib.py replicate \
  --master-addr `dig +short redis-1.redis.magedu.svc.k8s.local`:6379 \
  --slave-addr `dig +short redis-4.redis.magedu.svc.k8s.local`:6379
  
  
   redis-trib.py replicate \
  --master-addr `dig +short redis-2.redis.magedu.svc.k8s.local`:6379 \
  --slave-addr `dig +short redis-5.redis.magedu.svc.k8s.local`:6379

验证redis cluster状态

[root@k8s-master1 zook]# kubectl exec -it redis-1 bash -n magedu
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@redis-1:/data# redis-cli 
127.0.0.1:6379> cluster info

 

在master写入数据,在node验证 

实战案例之MySQL主从架构

StatefulSet本质上是Deployment的一种变体,在v1.9版本中已成为GA版本,它为了解决有状态服务的问题,它所管理的Pod拥有固定的Pod名称,启停顺序,在Statefulset中,Pod名字称为网络标识(hostname),还必须要用到共享存储。在Deployment中,与之对应的服务是service,而在statefulSet中与之对应的headless service,headless service,即无头服务,与service的区别就是它没有cluster IP,解析它的名称时将返回该Headless service对应的全邵Pod的Endpoint列表。

特点:
->给每个pod分配固定且唯一的网络标识符
->给每个pod分配固定且持久化的外部存储
->对pod进行有序的部署和扩展
->对pod进有序的删除和终止
->对pod进有序的自动滚动更新

 写在master,xtrabackup用来同步数据

创建PV

[root@k8s-master1 pv]# cat mysql-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-1
  namespace: magedu
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/mysql/mysql-datadir-1 
    server: 192.168.226.144
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-2
  namespace: magedu
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/mysql/mysql-datadir-2
    server: 192.168.226.144
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-3
  namespace: magedu
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/mysql/mysql-datadir-3
    server: 192.168.226.144

创建configmap

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  namespace: magedu
  labels:
    app: mysql
data:
  master.cnf: |    #master使用的配置
    # Apply this config only on the master.
    [mysqld]
    log-bin
    log_bin_trust_function_creators=1
    lower_case_table_names=1
  slave.cnf: |    #slave使用的配置
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
    log_bin_trust_function_creators=1

创建service

[root@k8s-master1 mysql]# cat mysql-services.yaml 
# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1    #无头服务,访问解析为pod地址,轮询转发请求
kind: Service
metadata:
  namespace: magedu
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service    #只读service,可以定义selector将读请求转发到node节点
metadata:
  name: mysql-read
  namespace: magedu
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

创建MySQL pod

[root@k8s-master1 mysql]# cat mysql-services.yaml 
# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
  namespace: magedu
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  namespace: magedu
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql
[root@k8s-master1 mysql]# clear
[root@k8s-master1 mysql]# cat mysql-statefulset.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
  namespace: magedu
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: k8s-harbor.com/public/mysql:5.7 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: k8s-harbor.com/public/xtrabackup:v1
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: k8s-harbor.com/public/mysql:5.7 
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 200m
            memory: 400Mi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: k8s-harbor.com/public/xtrabackup:v1 
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql
          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi
          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql -h 127.0.0.1 <<EOF
          $(<change_master_to.sql.orig),
            MASTER_HOST='mysql-0.mysql',
            MASTER_USER='root',
            MASTER_PASSWORD='',
            MASTER_CONNECT_RETRY=10;
          START SLAVE;
          EOF
          fi
          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi

验证数据同步

主节点创建,从节点验证 

实战案例之WordPress

基于nginx和php实现wordpress博客战斗,要求nginx和php运行在同一个pod的不同容器,mysql用来增删改查。

构建nginx和php镜像

nginx镜像

FROM k8s-harbor.com/public/nginx-base:v1.20.2

ADD nginx.conf /apps/nginx/conf/nginx.conf    #路径要写对。实际镜像路径/usr/local/nginx
ADD run_nginx.sh /apps/nginx/sbin/run_nginx.sh
RUN mkdir -pv /home/nginx/wordpress
RUN chown nginx.nginx /home/nginx/wordpress/ -R

EXPOSE 80 443

CMD ["/apps/nginx/sbin/run_nginx.sh"]


[root@k8s-master1 nginx]# cat run_nginx.sh     #守护进程
#!/bin/bash
#echo "nameserver 10.20.254.254" > /etc/resolv.conf
#chown nginx.nginx /home/nginx/wordpress/ -R
/usr/sbin/nginx
tail -f /etc/hosts
        location / {    #nginx.conf 访问直接读取挂载目录下文件
            root    /home/nginx/wordpress;
            index   index.php index.html index.htm;
            #if ($http_user_agent ~ "ApacheBench|WebBench|TurnitinBot|Sogou web spider|Grid Service") {
            #    proxy_pass http://www.baidu.com;
            #    #return 403;
            #}
        }

        location ~ \.php$ {
            root           /home/nginx/wordpress;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            #fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
             include        fastcgi_params;
        }

php镜像

[root@k8s-master1 php]# cat Dockerfile 
#PHP Base Image
FROM k8s-harbor.com/public/centos-base:7.8.2003


RUN yum install -y  https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install  php56-php-fpm php56-php-mysql -y 
ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf
RUN useradd nginx -u 2022
ADD run_php.sh /usr/local/bin/run_php.sh
EXPOSE 9000

CMD ["/usr/local/bin/run_php.sh"]


[root@k8s-master1 php]# cat run_php.sh #要有可执行权限
#!/bin/bash
#echo "nameserver 10.20.254.254" > /etc/resolv.conf

/opt/remi/php56/root/usr/sbin/php-fpm
#/opt/remi/php56/root/usr/sbin/php-fpm --nodaemonize
tail -f /etc/hosts

创建wordpress 基础pod

[root@k8s-master1 wordpress]# cat wordpress.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress-app
  template:
    metadata:
      labels:
        app: wordpress-app
    spec:
      containers:
      - name: wordpress-app-nginx
        image: k8s-harbor.com/public/wordpress-nginx:20220515
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      - name: wordpress-app-php
        image: k8s-harbor.com/public/wordpress-php-5.6:20220515
        #image: harbor.magedu.net/magedu/php:5.6.40-fpm 
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 9000
          protocol: TCP
          name: http
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      volumes:
      - name: wordpress
        nfs:
          server: 192.168.226.144
          path: /data/k8sdata/wordpress 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-spec
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 8089
    protocol: TCP
    targetPort: 80
    nodePort: 30001
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30033
  selector:
    app: wordpress-app

下载wordpress包并解压到挂载目录

 安装wordpress并验证

创建数据库和授予权限,数据库主机登录MySQL主节点查看hosts

create database wordpress;
grant all privileges on wordpress.* to "wordpress"@"%" identified by "wordpress";

flush privileges;

 

 

实战案例 zookeeper注册中心

不需要service,provider将地址放在注册中心,consumer去注册中心中获取。即使provider地址变动,注册中心也会实时更新

构建provider镜像和consumer镜像

FROM k8s-harbor.com/public/jdk-base:v8.212


RUN yum install file nc -y
RUN mkdir -p /apps/dubbo/provider
RUN useradd nginx
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider    #版本jar包
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]


dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.k8s.local:2181 | zookeeper://zookeeper2.magedu.svc.k8s.local:2181 | zookeeper://zookeeper3.magedu.svc.k8s.local:2181 #注册中心地址dubbo-demo-provider-2.1.5/conf/dubbo.properties
[root@k8s-master1 consumer]# cat Dockerfile 
#Dubbo consumer
FROM k8s-harbor.com/public/jdk-base:v8.212

RUN useradd nginx
RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer 
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

创建provider

[root@k8s-master1 provider]# cat provider.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-provider
  name: magedu-provider-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-provider
  template:
    metadata:
      labels:
        app: magedu-provider
    spec:
      containers:
      - name: magedu-provider-container
        image: k8s-harbor.com/public/dubbo-demo-provider:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-provider
  name: magedu-provider-spec
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: magedu-provider

验证

 

创建consumer

 

[root@k8s-master1 consumer]# cat consumer.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-consumer
  name: magedu-consumer-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-consumer
  template:
    metadata:
      labels:
        app: magedu-consumer
    spec:
      containers:
      - name: magedu-consumer-container
        image: k8s-harbor.com/public/dubbo-demo-consumer:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-consumer
  name: magedu-consumer-server
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    #nodePort: 30001
  selector:
    app: magedu-consumer

验证

 

 

 

HPA控制器(水平)

根据当前pod的负载,动态调整pod副本数量,业务高峰期自动扩容pod的副本数以尽快响应pod的请求。在业务低峰期对pod进行缩容,实现降本增效的目的。公有云支持node级别的弹性伸缩。

调整副本数方法
1、在dashboard调整
2、 kubectl scale deployment magedu-provider-deployment --replicas=2 -n magedu
水平pod自动缩放器(HPA):基于pod资源利用率横向调整pod副本数量。
垂直pod自动缩放器(VPA):基于pod资源利用率,调整对单个pod的最大资源限制,不能与HPA同时使用。
集群伸缩(Cluster Autoscaler,CA):基于集群中node资源分配情况,动态伸缩node节点,从而保证有CPU和内存资源用于创建pod.

HPA控制器简介

Horizontal Pod Autoscaling (HPA)控制器,根据预定义好的阈值及pod当前的资源利用率,自动控制在k8s集群中运行的pod数量(自动弹性水平自动伸缩).

--horizontal-pod-autoscalersync-period #默认每隔15s(可以通过-horizontal-pod-autoscaler-sync-period修改)查询metrics的资源使用情况.

--horizontal-pod-autoscaler-downscale-stabilization #缩容间隔周期,默认5分钟.

--horizontal-pod-autoscaler-sync-period #HPA控制器同步pod副本数的间隔周期

-horizontal-pod-autoscaler-cpu-inialization-period #初始化延迟时间,在此时间内pod的CPU资源指标将不会生效,默认为5分钟,

-horizontal-pod autoscalerinitial-readiness-delay #用于设置pod准备时间,在此时间内的pod统统被认为未就绪及不采集数据,默认为30秒.

--horizontal-pod-autoscaler-tolerance #HPA控制器能容忍的数据差异(浮点数,默认为0.1),即新的指标要与当前的阈值差异在0.1或以上,即要大于1+0.1=1.1,或小于1-0.1=0.9,比如阈值为CPU利用率50%,当前为80%,那么80/50=1.6>1.1则会触发扩容,反之会缩容.

即触发条件: avgCurrentPodsConsumption)/Target>1.1或<0.9=把N个pod的数据相加后根据pod的数量计算出平均数除以阈值,大于1.1就扩容,小于0.9就缩容.

计算公式:TargetNumOfPods = ceil(sum(CurrentPodsCPUUtilization)/Target) #ceil是一个向上取整的目的pod整数。

 实例

部署metrics-server

Metrics Server 是 Kubernetes 内置的容器资源指标来源。

Metrics Server 从node节点上的Kubelet收集资源指标,并通过Metrics API在Kubernetes apiserver中公开指标数据,以供Horizontal Pod Autoscaler和Vertical Pod Autoscaler使用,也可以通过访问kubectl top node/pod查看指标数据。

 

创建pod

[root@k8s-master1 hpa]# cat tomcat-app1.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-deployment-label
  name: magedu-tomcat-app1-deployment
  namespace: magedu
spec:
  replicas: 2
  selector:
    matchLabels:
      app: magedu-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-selector
    spec:
      containers:
      - name: magedu-tomcat-app1-container
        #image: harbor.magedu.local/magedu/tomcat-app1:v7
        image: tomcat:7.0.93-alpine 
        #image: k8s-harbor.com/public/docker-stress-ng:v1
        #args: ["--vm", "2", "--vm-bytes", "256M"]
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        imagePullPolicy: IfNotPresent
        ##imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "500Mi"

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-service-label
  name: magedu-tomcat-app1-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    #nodePort: 40003
  selector:
    app: magedu-tomcat-app1-selector

 创建hap控制器

[root@k8s-master1 hpa]# cat hpa.yaml 
#apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v1 
kind: HorizontalPodAutoscaler
metadata:
  namespace: magedu
  name: magedu-tomcat-app1-podautoscaler
  labels:
    app: magedu-tomcat-app1
    version: v2beta1
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    #apiVersion: extensions/v1beta1 
    kind: Deployment
    name: magedu-tomcat-app1-deployment #目标名称
  minReplicas: 3    #最小pod数
  maxReplicas: 10    #最大pod数
  targetCPUUtilizationPercentage: 10    #定义cpu阙值,超过这个值就会扩
  #metrics:
  #- type: Resource
  #  resource:
  #    name: cpu
  #    targetAverageUtilization: 60
  #- type: Resource
  #  resource:
  #    name: memory

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值