MySQL 运维篇 -- 路由集群搭建

目标

在这里插入图片描述

功用IPHost
MySQL Shell13.13.3.3mysh
MySQL Router Active13.13.4.4router01
MySQL Router Standby13.13.5.5router02
Group Replication Member13.13.6.6db01
Group Replication Member13.13.7.7db02
Group Replication Member13.13.8.8db03

环境初定

  1. 配置YUM源(all)

下载地址:https://dev.mysql.com/downloads/repo/yum/

[root@mysh ~]# rpm -Uvh mysql80-community-release-el8-1.noarch.rpm 
warning: mysql80-community-release-el8-1.noarch.rpm: Header V3 DSA/SHA1 Signature, key ID 5072e1f5: NOKEY
Verifying...                          ################################# [100%]
Preparing...                          ################################# [100%]
Updating / installing...
   1:mysql80-community-release-el8-1  ################################# [100%]
[root@mysh ~]# 
  1. 软件安装
[root@mysh ~]# dnf install mysql-shell
[root@router01 ~]# dnf install mysql-router
[root@router02 ~]# dnf install mysql-router
[root@db01 ~]# dnf install @mysql
[root@db02 ~]# dnf install @mysql
[root@db03 ~]# dnf install @mysql
  1. 配置/etc/hosts(all)
[root@mysh ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

13.13.2.2 keepalived
13.13.3.3 mysh
13.13.4.4 router01
13.13.5.5 router02
13.13.6.6 db01
13.13.7.7 db02
13.13.8.8 db03
[root@mysh ~]#

数据库实例准备

  1. MySQL环境(db01-3)
[root@db01 ~]# cat /etc/my.cnf.d/mysql-server.cnf
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
log-error=/var/log/mysql/mysqld.log
pid-file=/run/mysqld/mysqld.pid

#
# Replication configuration parameters
#
server-id=6
gtid-mode=on
enforce-gtid-consistency
log-slave-updates

[mysql]
prompt=db01 [\\d]>
[root@db01 ~]# 
[root@db02 ~]# cat /etc/my.cnf.d/mysql-server.cnf
....
server-id=7
....
prompt=db02 [\\d]>
[root@db02 ~]# 
[root@db03 ~]# cat /etc/my.cnf.d/mysql-server.cnf
....
server-id=8
....
prompt=db03 [\\d]>
[root@db03 ~]# 
  1. 配置mysql安全(db01-3)

注意:端口33061开放用于Group Replication时交换数据。

[root@db01 ~]# firewall-cmd --permanent --add-service=mysql
success
[root@db01 ~]# firewall-cmd --permanent --add-port=33061/tcp
success
[root@db01 ~]# firewall-cmd --reload
success
[root@db01 ~]# setsebool -P nis_enabled 1
[root@db01 ~]# systemctl start mysqld
[root@db01 ~]# 
  1. 创建远程管理用户(db01-3)
db01 [(none)]>set sql_log_bin=0;
Query OK, 0 rows affected (0.00 sec)

db01 [(none)]>create user root@'13.13.%' identified by 'abcd1234..';
Query OK, 0 rows affected (0.00 sec)

db01 [(none)]>grant all on *.* to root@'13.13.%' with grant option;
Query OK, 0 rows affected (0.00 sec)

db01 [(none)]>flush privileges;
Query OK, 0 rows affected (0.00 sec)

db01 [(none)]>set sql_log_bin=1;
Query OK, 0 rows affected (0.00 sec)

db01 [(none)]>

集群的创建

  1. 创建Cluster(mysh)
[root@mysh ~]# mysqlsh 
 MySQL  JS > shell.connect('root@db01:3306')
 MySQL  db01:3306 ssl  JS > cluster = dba.createCluster('dbs',{'localAddress':'db01:33061', 'ipWhitelist':'13.13.0.0/16'})
 MySQL  db01:3306 ssl  JS > cluster.addInstance('root@db02:3306',{'localAddress':'db02:33061', 'ipWhitelist':'13.13.0.0/16'})
 
NOTE: The target instance 'db02:3306' has not been pre-provisioned (GTID set is empty). 
The Shell is unable to decide whether incremental state recovery can correctly 
provision it.

The safest and most convenient way to provision a new instance is through automatic 
clone provisioning, which will completely overwrite the state of 'db02:3306' with a 
physical snapshot from an existing cluster member. To use this method by default, set 
the 'recoveryMethod' option to 'clone'.

The incremental state recovery may be safely used if you are sure all updates ever 
executed in the cluster were done with GTIDs enabled, there are no purged transactions 
and the new instance contains the same GTID set as the cluster or a subset of it. To 
use this method by default, set the 'recoveryMethod' option to 'incremental'.


Please select a recovery method [C]lone/[I]ncremental recovery/[A]bort (default Clone): C

The instance 'db02:3306' was successfully added to the cluster.

 MySQL  db01:3306 ssl  JS > cluster.addInstance('root@db03:3306',{'localAddress':'db03:33061', 'ipWhitelist':'13.13.0.0/16'})
 MySQL  db01:3306 ssl  JS > 
 MySQL  db01:3306 ssl  JS > cluster.status()
{
    "clusterName": "dbs", 
    "defaultReplicaSet": {
        "name": "default", 
        "primary": "db01:3306", 
        "ssl": "REQUIRED", 
        "status": "OK", 
        "statusText": "Cluster is ONLINE and can tolerate up to ONE failure.", 
        "topology": {
            "db01:3306": {
                "address": "db01:3306", 
                "mode": "R/W", 
                "readReplicas": {}, 
                "replicationLag": null, 
                "role": "HA", 
                "status": "ONLINE", 
                "version": "8.0.21"
            }, 
            "db02:3306": {
                "address": "db02:3306", 
                "mode": "R/O", 
                "readReplicas": {}, 
                "replicationLag": null, 
                "role": "HA", 
                "status": "ONLINE", 
                "version": "8.0.21"
            }, 
            "db03:3306": {
                "address": "db03:3306", 
                "mode": "R/O", 
                "readReplicas": {}, 
                "replicationLag": null, 
                "role": "HA", 
                "status": "ONLINE", 
                "version": "8.0.21"
            }
        }, 
        "topologyMode": "Single-Primary"
    }, 
    "groupInformationSourceMember": "db01:3306"
}
 MySQL  db01:3306 ssl  JS > 

路由的配置

  1. 初始化Router(router01-2)
[root@router01 ~]# mysqlrouter --bootstrap root@db01:3306 --user=mysqlrouter
....
## MySQL Classic protocol

- Read/Write Connections: localhost:6446
- Read/Only Connections:  localhost:6447
....
[root@router01 ~]# 
[root@router01 ~]# ls /etc/mysqlrouter/
mysqlrouter.conf  mysqlrouter.conf.bak  mysqlrouter.key
[root@router01 ~]# cat /etc/mysqlrouter/mysqlrouter.conf
....
[routing:dbs_rw]
bind_address=0.0.0.0
bind_port=6446
destinations=metadata-cache://dbs/?role=PRIMARY
routing_strategy=first-available
protocol=classic

[routing:dbs_ro]
bind_address=0.0.0.0
bind_port=6447
destinations=metadata-cache://dbs/?role=SECONDARY
routing_strategy=round-robin-with-fallback
protocol=classic
....
[root@router01 ~]# 
  1. 设定router安全(router01-2)
[root@router01 ~]# firewall-cmd --permanent --add-port=6446/tcp
success
[root@router01 ~]# firewall-cmd --permanent --add-port=6447/tcp
success
[root@router01 ~]# firewall-cmd --reload
success
[root@router01 ~]# systemctl enable mysqlrouter.service 
Created symlink /etc/systemd/system/multi-user.target.wants/mysqlrouter.service → /usr/lib/systemd/system/mysqlrouter.service.
[root@router01 ~]# systemctl start mysqlrouter.service 
[root@router01 ~]#
  1. 测试router
[root@mysh ~]# mysqlsh --sql root@router01:6446 -p -e 'select @@hostname;'
db02
[root@mysh ~]# mysqlsh --sql root@router01:6447 -p -e 'select @@hostname;'
db03
[root@mysh ~]# mysqlsh --sql root@router02:6446 -p -e 'select @@hostname;'
db02
[root@mysh ~]# mysqlsh --sql root@router02:6447 -p -e 'select @@hostname;'
db03
[root@mysh ~]# 

设定VIP

  1. 配置keepalived(router01-2)
[root@router01 ~]# dnf install keepalived
[root@router01 ~]# cp /etc/keepalived/keepalived.conf{,.bak}
[root@router01 ~]# vi /etc/keepalived/keepalived.conf
[root@router01 ~]# cat /etc/keepalived/keepalived.conf
vrrp_script chk_mysqlrouter {
  script "/etc/keepalived/chk_mysqlrouter.sh"
  interval 2
  fall 2
}

vrrp_instance VI_1 {
  interface ens160
  state MASTER
  virtual_router_id 51
  priority 101
  advert_int 1
  nopreempt
  virtual_ipaddress {
    13.13.2.2/16
  }
  track_script {
    chk_mysqlrouter
  }
}
[root@router01 ~]# vi /etc/keepalived/chk_mysqlrouter.sh
[root@router01 ~]# chmod a+x /etc/keepalived/chk_mysqlrouter.sh
[root@router01 ~]# cat /etc/keepalived/chk_mysqlrouter.sh 
#!/bin/bash
/usr/bin/killall -0 /usr/bin/mysqlrouter
[root@router01 ~]# 
[root@router02 ~]# cat /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
  interface ens160
  state BACKUP
  virtual_router_id 51
  priority 100 
  advert_int 1
  nopreempt
  virtual_ipaddress {
    13.13.2.2/16
  }
}
[root@router02 ~]# 
  1. 配置keepalived安全(router01-02)
[root@router01 ~]# firewall-cmd --zone=public --permanent \
> --add-rich-rule='rule protocol value="vrrp" accept'
success
[root@router01 ~]# firewall-cmd --reload
success
[root@router01 ~]# systemctl enable keepalived.service 
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
[root@router01 ~]# systemctl start keepalived.service 
[root@router01 ~]#
  1. 解决keepalived脚本不能运行问题(router01)
[root@router01 ~]# journalctl -xe
Oct 01 15:15:12 router01 platform-python[4649]: SELinux is preventing killall from getattr access on the file /usr/lib/systemd/systemd-logind.

                                                *****  Plugin catchall (100. confidence) suggests   **************************
                                                
                                                If you believe that killall should be allowed getattr access on the crond file by default.
                                                Then you should report this as a bug.
                                                You can generate a local policy module to allow this access.
                                                Do
                                                allow this access for now by executing:
                                                # ausearch -c 'killall' --raw | audit2allow -M my-killall
                                                # semodule -X 300 -i my-killall.pp
                                                
[root@router01 ~]# ausearch -c 'killall' --raw | audit2allow -M my-killall
[root@router01 ~]# semodule -i my-killall.pp
[root@router01 ~]# 
[root@router01 ~]# ip a
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:4b:fe:85 brd ff:ff:ff:ff:ff:ff
    inet 13.13.5.5/16 brd 13.13.255.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 13.13.2.2/16 scope global secondary ens160
       valid_lft forever preferred_lft forever
    inet6 fe80::96aa:5e43:bf68:cd74/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
[root@router01 ~]#
  1. 尝试停止router01上的mysqlrouter.service
[root@router01 ~]# systemctl stop mysqlrouter.service 
[root@router01 ~]# ip a
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:4b:fe:85 brd ff:ff:ff:ff:ff:ff
    inet 13.13.5.5/16 brd 13.13.255.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet6 fe80::96aa:5e43:bf68:cd74/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
[root@router01 ~]# 
[root@router02 ~]# ip a
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:ed:a9:20 brd ff:ff:ff:ff:ff:ff
    inet 13.13.4.4/16 brd 13.13.255.255 scope global noprefixroute ens160
       valid_lft forever preferred_lft forever
    inet 13.13.2.2/16 scope global secondary ens160
       valid_lft forever preferred_lft forever
    inet6 fe80::edfd:c857:df55:602b/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
[root@router02 ~]# 
  1. 转移mysql访问至13.13.2.2 keepalived
[root@mysh ~]# mysqlsh --sql root@keepalived:6446 -p -e 'select @@hostname;'
@@hostname
db02
[root@mysh ~]# mysqlsh --sql root@keepalived:6447 -p -e 'select @@hostname;'
@@hostname
db03
[root@mysh ~]# 
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值