机器:(ubuntu20.04) kubernetes:1.23
mater | 192.168.111.141 |
node01 | 192.168.111.142 |
node02 | 192.168.111.143 |
nfs | 192.168.111.144 |
一、配置nfs
配置nfs创建可持续化储存。
在nfs上面安装nfs-server
apt -y install nfs-server
创建一个文件夹用来做后端存储。
mkdir /data-share
#修改权限
chown nobody:nogroup /data-share/
chmod 777 /data-share/
编辑/etc/exports文件,用来管理文件夹
root@nfs:~# cat /etc/exports
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
/data-share 192.168.111.0/24(rw,sync,no_root_squash,no_subtree_check)
运行以下命令重新加载nfs
root@nfs:~# exportfs -arv
root@nfs:~# showmount -e 127.0.0.1
Export list for 127.0.0.1:
/data-share 192.168.111.0/24
在客户端也就是node01和node02上面下载nfs-common
apt -y install nfs-common
用node01和node02分别挂载nfs,查看是否可以挂载成功
#挂载
root@node01:~# mount -t nfs 192.168.111.144:/data-share /data
#df -h 查看
root@node01:/data# df -h
Filesystem Size Used Avail Use% Mounted on
tmpfs 389M 2.4M 387M 1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv 29G 13G 15G 46% /
tmpfs 1.9G 0 1.9G 0% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
/dev/sda2 1.5G 246M 1.2G 18% /boot
tmpfs 389M 4.0K 389M 1% /run/user/0
nfs:/data-share 29G 7.8G 20G 29% /data
#测试一下是否可以共享文件夹
root@node01:/data# echo "Hello,I'm node01" >> hello
root@node01:/data# cat hello
Hello,I'm node01
# 切换为nfs服务器,查看
root@nfs:~# cd /data-share/
root@nfs:/data-share# ll
total 12
drwxrwxrwx 2 nobody nogroup 4096 Oct 20 12:16 ./
drwxr-xr-x 20 root root 4096 Oct 20 03:47 ../
-rw-r--r-- 1 nobody nogroup 17 Oct 20 12:17 hello
root@nfs:/data-share# cat hello
Hello,I'm node01
是可以挂载成功的。node02也是同样的方式测试一下。
二、编写pv用来做持续化储存
一、安装nfs:
下载nfs的yaml文件,修改yaml文件的配置。
yaml文件下载地址:https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/master/deploy/deployment.yaml
root@master01:/opt/k8s/controller/mysql# wget https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/master/deploy/deployment.yaml
#改一下名字更好的辨认
root@master01:/opt/k8s/controller/mysql# mv deployment.yaml nfs-deploy.yaml
## nfs-deploy.yaml配置如下
root@master01:/opt/k8s/controller/mysql# cat nfs-deploy.yaml
apiVersion: v1
kind: Namespace
metadata:
name: nfs-pro
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-pro #更改为我们上面创建的namespace
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/smxy-cc/nfs-subdir-external-provisioner:v4.0.2 #更改一下镜像,用原来的镜像可能会出现下载失败的情况
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.111.144 #更改为我们nfs server的IP地址
- name: NFS_PATH
value: /data-share #更改为我们想要挂载的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.111.144 #更改为我们nfs server的IP地址
path: /data-share #更改为我们想要挂载的目录
##创建
root@master01:/opt/k8s/controller/mysql# kubectl apply -f nfs-deploy.yaml
创建rbac.yaml文件(记得将命名空间改为上面创建的nfs-pro)
[root@master01/opt/k8s/controller/mysql] # cat nfs-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner #与deployment文件中的一致
namespace: nfs-pro
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-pro
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-pro
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-pro
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-pro
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
创建storageclass(创建sc后我们也不需要再一个一个的创建pv了)
root@master01/opt/k8s/controller/mysql] # cat nfs-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage #StorageClass的名字,创建PVC时要用到
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner #NFS 的provisioner 名称,与deployment中设置的需一致
parameters:
archiveOnDelete: "false"
[root@master01/opt/k8s/controller/mysql] # kubectl apply -f nfs-sc.yaml
创建pvc
[root@master01/opt/k8s/controller/mysql] # cat nfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-pvc
namespace: nfs-pro
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
storageClassName: nfs-storage #与上节创建的storageClass名称一致
resources:
requests:
storage: 8Gi
[root@master01/opt/k8s/controller/mysql] # kubectl apply -f nfs-pvc.yaml
三、实现数据库的主从复制
在实现数据库的主从复制时,我们需要解决三点:
- master节点和slave节点必须有不同的配置文件(即my.cnf)。
- matser节点和slave节点必须能够传输备份信息文件。
- 在slave节点第一次启动之前,需要执行一些初始化的sql操作。
下面我们来解决一下:
因为这个部署起来还是挺复杂的所以我们先定义一个框架,然后再往框架内添加配置就行了。
我们一起来介绍一下这个框架。
我们所选用的控制器为StatefulSet,因为我们这个主从复制的集群是有状态的pod,而StatefulSet正是为有状态的集群准备的,所以理所应当的选用StatefulSet为我们的控制器。这个下面真正的部署的时候我还会说到。
seletor:说明这个控制器只会控制带有app:mysql这个标签的pods
serviceName:也是statefulSet和deployment的最大的区别,这是告诉StatefulSet在执行控制循环(Control Loop)的时候,请使用 mysql 这个Headless Service 来保证 Pod 的“可解析身份”。
replicas:主要是控制pods的数量。
template:也就是pod的模板信息了。其中initContainers为初始化pods,因为咱们之前要解决的三个问题都需要去做一个判断,即mater节点和slave节点的判断。而这个initContainers就是可以帮助我们去判断master和salve。
volumeClaimTemplates:因为我们的数据库集群是一个有储存状态的集群,所以我们需要去通过pv将我们的数据进行可持续化储存。
接下来创建两个svc,一个svc是master节点的,另一个是slave节点的。master节点是用的headless service,而slave节点用的是普通的service。
[root@master01/opt/k8s/controller/mysql] # cat mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
框架大概就这样了。接下来我们来分别解决我们上面提出的三点。
一、master节点和slave节点需要不同的配置文件(my.cnf)
因为configmap可以更好地帮助我们去挂载需要的文件,所以编写一个configmap,用来存放配置文件。
ConfigMap.yaml
[root@master01/opt/k8s/controller/mysql] # cat ConfigMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
labels:
app: mysql
data:
master.cnf: |
# 主节点MySQL的配置文件
[mysqld]
log-bin
slave.cnf: |
# 从节点MySQL的配置文件
[mysqld]
super-read-only
configmap也就是一个key-value形式的值,| 前面的是key,| 后面的为value。master节点为log-bin,从节点为super-read-only,意思为只允许读。
创建configmap并查看详细。
[root@master01/opt/k8s/controller/mysql] # kubectl describe cm mysql
Name: mysql
Namespace: default
Labels: app=mysql
Annotations: <none>
Data
====
master.cnf:
----
# 主节点MySQL的配置文件
[mysqld]
log-bin
slave.cnf:
----
# 从节点MySQL的配置文件
[mysqld]
super-read-only
BinaryData
====
Events: <none>
通过挂载命令将我们的configmap挂载到我们的pod中,挂载的名字为config-map。
- name: config-map
configMap:
name: mysql
接下来我们就需要将master.cnf和slave.cnf分别放在对应的节点了。
如下代码,有详解。
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
# 这个命令的作用就是下面如果命令执行错误时,会停止不会再往下执行。
set -ex
# 第一步运用正则匹配,若 - 后面的数字是0-9的进行下面的操作,若不是则退出。
[[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1
# ordinal 等于hostname中 - 后面的数字
ordinal=${BASH_REMATCH[1]}
# 输入【mysqld】到文件里,作用是创建文件server-id.cnf
echo [mysqld] > /mnt/conf.d/server-id.cnf
# 因为server-id=0的话有特殊的含义,所以我们+100来避开他
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
# 下面是判断mysql的主从,这里是将序号为0的设为主数据库,其他的都是从数据库。
# 我们通过下面的挂载操作(volumeMounts中的config-map),是将configmap中的两个
#配置文件放在了 /mnt/config-map 下。
# 若pod的序列号为0(master节点),我们将/mnt/config-map下的master.cnf文件放
#在/mnt/conf.d下面。若不为0(slave节点),我们将/mnt/config-map下的slave.cnf文
#件放在/mnt/conf.d下面。
# 并将/mnt/conf.d/进行挂载,名字为conf
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
# 进行挂载上面已经讲过了。
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
二、matser节点和slave节点必须能够传输备份信息文件。
这里我们会用到Xtrabackup插件,XtraBackup 是业界主要使用的开源 MySQL 备份和恢复工具,通过 XtraBackup 将 Master 节点的数据备份到指定目录。这一步会自动生成一个xtrabackup_binlog_info的文件里面有两个内容,是我们初始化从节点时需要的。
代码如下(详解):
- name: clone-mysql
image: yizhiyong/xtrabackup
command:
- bash
- "-c"
- |
set -ex
# 拷贝任务只第一次启动时才执行,所以如果有文件的话,说明已经不是第一次执行了,所以退出
[[ -d /var/lib/mysql/mysql ]] && exit 0
# 判断如果 - 后面的数字不是 0-9,就退出.
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
# ordinal= ‘-’ 后面的数字
ordinal=${BASH_REMATCH[1]}
# 判断ordinal的值是不是0,也就是说mysql为mysql-0(master节点),master节点并不需要备份,所以退出。剩下的都是slave节点,也是需要从master节点备份数据的。
[[ $ordinal -eq 0 ]] && exit 0
# 通过ncat监视mysql的3307端口,并用xbstream将/var/lib/mysql(mysql的)文件夹下的文件拷贝过来
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
# 进行将得到的数据保存在/var/lib/mysql(xtrabackup的)下,用于下面的挂载
xtrabackup --prepare --target-dir=/var/lib/mysql
# 将我们的拷贝过来的文件夹挂载一下,用于我们下面的mysql容器使用。
volumeMounts:
- name: data
mountPath: /var/lib/mysql
- name: conf
mountPath: /etc/mysql/conf.d
三、初始化slave节点
我们目前,配置文件(master.cnf,slave.cnf)和我们要备份文件都有了,但是我们在初始化slave节点时需要配置一些SQL命令。所以我们用了一个新的容器去配置slave节点的SQL命令 。
代码如下(详解):
- name: xtrabackup
image: yizhiyong/xtrabackup
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
# 通过下面的挂载操作我们已经在/var/lib/mysql中挂载到了clone-mysql中的数据,进入/var/lib/mysql中
cd /var/lib/mysql
# 判断前面挂载的数据是不是salve节点的文件,如果是slave节点的文件,我们可以直接拷贝过来使用。
if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
# 我们直接去出有用信息放在change_master_to.sql.in文件中
cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
# 我们用完后就删除,因为初始化操作执行一次就行了。
rm -f xtrabackup_slave_info xtrabackup_binlog_info
# 判断是master节点的话,我们需要在xtrabackup_binlog_info中取出我们有用的两个信息。
elif [[ -f xtrabackup_binlog_info ]]; then
# 去出xtrabackup_binlog_info文件中的两个信息,我上面有讲,使用Xtrabackup时会创建一个xtrabackup_binlog_info的文件,文件中有两个信息是我们初始化salve节点时使用的。
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
# 取出后就删掉,防止我们再次初始化
rm -f xtrabackup_binlog_info xtrabackup_slave_info
# 将我们取出的数据编写一下放进change_master_to.sql.in文件中。
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
# 如果有这个文件就说明我们需要做初始化操作了。
if [[ -f change_master_to.sql.in ]]; then
# 我们必须要等到mysql容器启动起来再进行操作初始化操作。所以until一下我们的初始化操作。
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
# 进行初始操作,用我们前面得到的change_master_to.sql.in拼接成一个完整的SQL命令。
echo "Initializing replication from clone position"
mysql -h 127.0.0.1 \
-e "$(<change_master_to.sql.in), \
MASTER_HOST='mysql-0.mysql', \
MASTER_USER='root', \
MASTER_PASSWORD='', \
MASTER_CONNECT_RETRY=10; \
START SLAVE;" || exit 1
# 删掉,防止重启时,再次初始化。
mv change_master_to.sql.in change_master_to.sql.orig
fi
# 使用ncat打开一个3307的端口。这也与我们前面监听3307端口联系上了。它的作用是,在收到传输请求的时候,直接执行"xtrabackup --backup"命令,备份MySQL的数据并发送给请求者
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
- name: conf
mountPath: /etc/mysql/conf.d
#对该容器做一个限制,因为这个容器只不过是一个我们的配置容器。
resources:
requests:
cpu: 100m
memory: 100Mi
四、mysql容器本身的定义
到这里我们已经成功的解决了上面的三点。我们就可以看一下mysql容器本身的定义了,这是非常简单的。
代码如下(详解):
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 500m
memory: 1Gi
# 前面的没啥好说的一眼都知道什么意思了。
# 下面做了一个监控容器健康的检查。可以看一下我前面的文章有介绍
livenessProbe:
exec:
# 执行命令ping一下本身,如果ping不同,就是监看有问题
command: ["mysqladmin", "ping"]
# 容器创建成功30S后执行
initialDelaySeconds: 30
# 每10s执行一次
periodSeconds: 10
# 超时5S就说明健康有问题。
timeoutSeconds: 5
readinessProbe:
exec:
# Check we can execute queries over TCP (skip-networking is off).
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
到此,我们所有的准备工作都完成了。
五、完整的yaml文件
[root@master01/opt/k8s/controller/mysql] # cat mysql-stateSetful2.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
app.kubernetes.io/name: mysql
serviceName: mysql
replicas: 3
template:
metadata:
labels:
app: mysql
app.kubernetes.io/name: mysql
spec:
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
set -ex
# Generate mysql server-id from pod ordinal index.
[[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
# Add an offset to avoid reserved server-id=0 value.
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
# Copy appropriate conf.d files from config-map to emptyDir.
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/primary.cnf /mnt/conf.d/
else
cp /mnt/config-map/replica.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
# Skip the clone if data already exists.
[[ -d /var/lib/mysql/mysql ]] && exit 0
# Skip the clone on primary (ordinal index 0).
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
# Clone data from previous peer.
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
# Prepare the backup.
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 500m
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
# Check we can execute queries over TCP (skip-networking is off).
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
# Determine binlog position of cloned data, if any.
if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
# XtraBackup already generated a partial "CHANGE MASTER TO" query
# because we're cloning from an existing replica. (Need to remove the tailing semicolon!)
cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
# Ignore xtrabackup_binlog_info in this case (it's useless).
rm -f xtrabackup_slave_info xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
# We're cloning directly from primary. Parse binlog position.
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm -f xtrabackup_binlog_info xtrabackup_slave_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
# Check if we need to complete a clone by starting replication.
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mysql -h 127.0.0.1 \
-e "$(<change_master_to.sql.in), \
MASTER_HOST='mysql-0.mysql', \
MASTER_USER='root', \
MASTER_PASSWORD='', \
MASTER_CONNECT_RETRY=10; \
START SLAVE;" || exit 1
# In case of container restart, attempt this at-most-once.
mv change_master_to.sql.in change_master_to.sql.orig
fi
# Start a server to send backups when requested by peers.
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
storageClassName: "nfs-storage"
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
创建statefulset
kubectl apply -f mysql-statefulset.yaml
创建成功
[root@master01/opt/k8s/controller/mysql] # kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mysql-0 2/2 Running 0 10m 10.220.140.73 node02 <none> <none>
mysql-1 2/2 Running 0 10m 10.220.196.133 node01 <none> <none>
六、检测
通过master节点写入数据。
kubectl run mysql-client --image=mysql:5.7 -i --rm --restart=Never --\
mysql -h mysql-0.mysql <<EOF
CREATE DATABASE test;
CREATE TABLE test.messages (message VARCHAR(250));
INSERT INTO test.messages VALUES ('hello');
EOF
通过mysql-read服务查看数据。
kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
mysql -h mysql-read -e "SELECT * FROM test.messages"
Waiting for pod default/mysql-client to be running, status is Pending, pod ready: false
+---------+
| message |
+---------+
| hello |
+---------+
pod "mysql-client" deleted
我们再连接mysql-1去查看一下数据。发现可以查看到我们上面用mysql-0写入的数据,说明主从复制成功。
## 这是我后面测试时,通过上面的方法新加入的数据
[root@master01/opt/k8s/controller/mysql] # kubectl run -it mysql-client --image=mysql:5.7 --rm --restart=Never -- mysql -h mysql-1.mysql -e "select * from test.messages;"
+-------------------+
| message |
+-------------------+
| hello,kubernetes! |
| hello,mysql-read! |
+-------------------+
pod "mysql-client" deleted
我们用mysql-1去进入一下数据。发现出错,出错内容为这个服务器权限为只读,不能写入。
[root@master01/opt/k8s/controller/mysql] # kubectl run -it mysql-client --image=mysql:5.7 --rm --restart=Never -- mysql -h mysql-1.mysql -e "insert into test.messages values ('hello,I am mysql-1');"
ERROR 1290 (HY000) at line 1: The MySQL server is running with the --super-read-only option so it cannot execute this statement
pod "mysql-client" deleted
pod default/mysql-client terminated (Error)
七、记录出错
最后,记录一下遇到的错误
root@master01:/opt/k8s/controller/mysql# kubectl get pods
NAME READY STATUS RESTARTS AGE
mysql-0 1/2 CrashLoopBackOff 3 (5s ago) 60s
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0
error: a container name must be specified for pod mysql-0, choose one of: [mysql xtrabackup] or one of the init containers: [init-mysql clone-mysql]
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c clone-mysql
+ [[ -d /var/lib/mysql/mysql ]]
++ hostname
+ [[ mysql-0 =~ -([0-9]+)$ ]]
+ ordinal=0
+ [[ 0 -eq 0 ]]
+ exit 0
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c mysql
2022-10-21 17:09:31+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 5.7.36-1debian10 started.
chown: changing ownership of '/var/lib/mysql/': Operation not permitted
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c xtrabackup
+ cd /var/lib/mysql
+ [[ -f xtrabackup_slave_info ]]
+ [[ -f xtrabackup_binlog_info ]]
+ [[ -f change_master_to.sql.in ]]
+ exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c 'xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root'
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c clone-mysql
+ [[ -d /var/lib/mysql/mysql ]]
++ hostname
+ [[ mysql-0 =~ -([0-9]+)$ ]]
+ ordinal=0
+ [[ 0 -eq 0 ]]
+ exit 0
发现创建mysql时出错啦!!!!然后就是找错。。。。发现在创建mysql的pod时出现错误
chown: changing ownership of '/var/lib/mysql/': Operation not permitted
意思就是没有权限。然后我就看了一下我的nfs配置,是错了。。
在/etc/exports中改为如下:
root@nfs:/data-share# cat /etc/exports
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
/data-share 192.168.111.0/24(rw,sync,no_root_squash,no_subtree_check)