一 MMM 高可用mysql简介
MMM(Master-Master Replication mananger for mysql),由一个管理端(monitor)和多个代理端(agent)构成。通过MMM可以实现监控和管理Mysql主主复制和服务状态,同时也可监控多个Slave节点的复制以及运行状态,并且可以做到任何节点发生故障时实现自动化切换的功能。
MMM套件三个主要脚本:
mmm_mond:监控进程,运行在管理节点,主要负责对所有数据库的监控工作,同时决定和处理所有节点的角色切换。
mmm_agent:代理进程,运行在每台Mysql服务器,完成监控的测试工作和执行远程服务设置。
mmm_control:管理脚本,查看和管理集群运行状态,同时管理mmm_mond进程。
二 MMM典型应用架构
三 MMM双主多从Mysql架构配置
架构图如上图
双主双从应用架构读、写分离IP列表
0.配置前准备
校时操作
#安装ntpdate工具
yum install ntpdate -y
#使用ntpdate校时(后面的是ntp服务器)
ntpdate pool.ntp.org
关闭selinux
setenforce 0
sed -i 's/enforcing/disabled/g' /etc/selinux/config
1 MMM的安装配置
1.MMM套件安装
1.在Monitor端安装所有MMM组件
yum install epel-release.noarch -y
yum install mysql-mmm mysql-mmm-agent mysql-mmm-tools mysql-mmm-monitor -y
2.在其他所有节点安装mysql-mmm-agent
yum install epel-release.noarch -y
yum install mysql-mmm-agent -y
2.Master1和Master2的主主配置和Masetr1和Slave1和Slave2的主从配置
(安装配置参考《Mysql主从复制配置》《Mysql+Keepalived双主互备高可用》的配置)
3.在所有MySQL节点的/etc/my.cnf中增加参数(要重启)
read_only=1
#read_only是因为MMM对数据需严格的读写控制
#此参数不影响replication;root用户依然可写。
4.所有MySQL节点创建monitor user(健康检测)和monitor agent(切换只读模式和同步Master信息)帐号(仅在mysql写入主节点,其他节点会自动复制)
grant replication client on *.* to 'mmm_monitor'@'192.168.1.%' identified by 'monitorpasswd';
grant super, replication client, process on *.* to 'mmm_agent'@'192.168.1.%' identified by 'agentpasswd';
5.在所有MMM节点配置mmm_common.conf (注意以下所有配置文件中不能以下注释,会报错 使用sed -i '/^#/d;s/#.*//g' file 清除注释)
vim /etc/mysql-mmm/mmm_common.conf
- #当设置此参数,所有mysql节点都设置为"read_only=1",MMM会根据Mysql角色来决定是否执行"set global read_only=0".
active_master_role writer
<host default>
cluster_interface eno16777736 #设置网络接口
pid_path /run/mysql-mmm-agent.pid #设置PID文件位置
bin_path /usr/libexec/mysql-mmm/ #设置MMM可执行文件路径
replication_user slave_cp #设置复制的用户名
replication_password pass #设置复制用户密码
agent_user mmm_agent #设置更改只读操作用户
agent_password agentpasswd #设置更改只读操作用户密码
</host>
<host db1> #DB1配置信息
ip 192.168.1.166
mode master
peer db2 #与DB1对等主机
</host>
<host db2>
ip 192.168.1.168
mode master
peer db1
</host>
<host db3>
ip 192.168.1.186
mode slave
</host>
<host db4>
ip 192.168.1.188
mode slave
</host>
<role writer> #设置可执行写用户
hosts db1, db2 #DB1和DB2都可执行
ips 192.168.1.160 #设置可写的VIP
mode exclusive #设置角色互斥模式,互斥角色只有一个IP,同一时间只能分配给一个用户
</role>
<role reader> #设置刻度角色模式
hosts db1, db2, db3, db4 #设置可执行主机
ips 192.168.1.151, 192.168.1.152, 192.168.1.153, 192.168.1.154
mode balanced #设置角色模式为负载均衡,这些IP动态分配多个MySQL主机
</role>
6.在仅在MMM管理节点配置mmm_mom.conf
vim /etc/mysql-mmm/mmm_mon.conf
- include mmm_common.conf
<monitor>
ip 127.0.0.1 #安全起见,只在本机监听,默认端口9988
pid_path /run/mysql-mmm-monitor.pid
bin_path /usr/libexec/mysql-mmm
status_path /var/lib/mysql-mmm/mmm_mond.status
#测试网络连通性,只要一个正常则网络正常
ping_ips 192.168.1.1, 192.168.1.166, 192.168.1.168, 192.168.1.186, 192.168.1.188
flap_duration 3600 #抖动时间范围
flap_count 3 #在抖动时间范围内最大抖动次数
auto_set_online 8
#是否自动上线,如果大于0,抖动的主机在抖动时间范围过后,则设置自动上线- # The kill_host_bin does not exist by default, though the monitor will
# throw a warning about it missing. See the section 5.10 "Kill Host
# Functionality" in the PDF documentation.
#
# kill_host_bin /usr/libexec/mysql-mmm/monitor/kill_host
#
</monitor>
<host default>
monitor_user mmm_monitor
monitor_password
monitorpasswd
</host>
debug 0 #MMM管理端运行模式 0 正常模式 1 debug模式
#开启MMM管理端的9988端口
firewall-cmd --permanent --add-port=9988/tcp
firewall-cmd --reload
#开启所有mysql节点的9989端口
firewall-cmd --permanent --add-port=9989/tcp
firewall-cmd --reload
7.在所有Mysql节点设置mmm_agent.conf
vim /etc/mysql-mmm/mmm_agent.conf
include mmm_common.conf
this db1 #在四台mysql节点上设置对应的db,分别为db1、db2、db3、db4
8.启动MMM服务
#MMM管理端启动
systemctl restart mysql-mmm-monitor
systemctl enable mysql-mmm-monitor
#Mysql节点启动
systemctl restart mysql-mmm-agent
systemctl enable mysql-mmm-agent
#MMM管理端基本管理命令
mmm_control show db1(192.168.1.166) master/AWAITING_RECOVERY. Roles: db2(192.168.1.168) master/AWAITING_RECOVERY. Roles: db3(192.168.1.186) slave/AWAITING_RECOVERY. Roles: db4(192.168.1.188) slave/AWAITING_RECOVERY. Roles:
#如果一直显示等待,可手动设置
mmm_control set_online db1
mmm_control set_online db2
mmm_control set_online db3
mmm_control set_online db4
- mmm_control show
- db1(192.168.1.166) master/ONLINE. Roles: reader(192.168.1.154), writer(192.168.1.150)
db2(192.168.1.168) master/ONLINE. Roles: reader(192.168.1.151)
db3(192.168.1.186) slave/ONLINE. Roles: reader(192.168.1.153)
db4(192.168.1.188) slave/ONLINE. Roles: reader(192.168.1.152)
#查看各个节点运行状态
mmm_control checks all
db4 ping [last change: 2017/03/25 22:55:49] OK
db4 mysql [last change: 2017/03/25 22:55:49] OK
db4 rep_threads [last change: 2017/03/25 22:55:49] OK
db4 rep_backlog [last change: 2017/03/25 22:55:49] OK: Backlog is null
db2 ping [last change: 2017/03/25 22:55:49] OK
db2 mysql [last change: 2017/03/25 22:55:49] OK
db2 rep_threads [last change: 2017/03/25 22:55:49] OK
db2 rep_backlog [last change: 2017/03/25 22:55:49] OK: Backlog is null
db3 ping [last change: 2017/03/25 22:55:49] OK
db3 mysql [last change: 2017/03/25 22:55:49] OK
db3 rep_threads [last change: 2017/03/25 22:55:49] OK
db3 rep_backlog [last change: 2017/03/25 22:55:49] OK: Backlog is null
db1 ping [last change: 2017/03/25 22:55:49] OK
db1 mysql [last change: 2017/03/25 22:55:49] OK
db1 rep_threads [last change: 2017/03/25 22:55:49] OK
db1 rep_backlog [last change: 2017/03/25 22:55:49] OK: Backlog is null
#查看mysql各个节点VIP绑定状态
ip a
9.测试
1.读写分离测试
#创建测试用户
mysql -uroot -p
create database test;
create user test@"192.168.1.%" identified by '123';
grant all on test.* to test@"192.168.1.%";
exit
#写VIP登录(创建表单,插入数据测试略)
mysql -utest -p -h192.168.1.160
use test;
- create table mmm_test(id varchar(60));
- insert into mmm_test (id) values ("masetr");
- exit
#读VIP登录
mysql -utest -p -h192.168.1.151
select * from test.mmm_test;
+----------------+ | Tables_in_test | +----------------+ | mmm_test | +----------------+
mysql -utest -p -h192.168.1.152
select * from test.mmm_test;
2.故障测试
[root@monitor ~]# mmm_control show
db1(192.168.1.166) master/ONLINE. Roles: reader(192.168.1.152), writer(192.168.1.160)
db2(192.168.1.168) master/ONLINE. Roles: reader(192.168.1.151)
db3(192.168.1.186) slave/ONLINE. Roles: reader(192.168.1.154)
db4(192.168.1.188) slave/ONLINE. Roles: reader(192.168.1.153)
[root@DB1 ~]# systemctl stop mariadb
[root@www ~]# mmm_control show
db1(192.168.1.166) master/HARD_OFFLINE. Roles:
db2(192.168.1.168) master/ONLINE. Roles: reader(192.168.1.151)
db3(192.168.1.186) slave/ONLINE. Roles: reader(192.168.1.154)
db4(192.168.1.188) slave/ONLINE. Roles: reader(192.168.1.153)
[root@monitor ~]# mmm_control show
db1(192.168.1.166) master/HARD_OFFLINE. Roles:
db2(192.168.1.168) master/ONLINE. Roles: reader(192.168.1.151), writer(192.168.1.160)
db3(192.168.1.186) slave/ONLINE. Roles: reader(192.168.1.152), reader(192.168.1.154)
db4(192.168.1.188) slave/ONLINE. Roles: reader(192.168.1.153)
[root@DB1 ~]# systemctl restart mariadb
#注意虽然DB1复活,但写VIP仍然在DB2不变
[root@monitor ~]# mmm_control show
db1(192.168.1.166) master/ONLINE. Roles: reader(192.168.1.152)
db2(192.168.1.168) master/ONLINE. Roles: reader(192.168.1.151), writer(192.168.1.160)
db3(192.168.1.186) slave/ONLINE. Roles: reader(192.168.1.154)
db4(192.168.1.188) slave/ONLINE. Roles: reader(192.168.1.153)
四 Amoeba优化MMM架构(服务器IP 192.168.1.199)
1.安装Amoeba开发环境Java
#建立安装目录
mkdir /usr/java
cd /usr/java
#官网下载地址http://download.oracle.com/otn-pub/java/jdk/8u92-b14/jdk-8u92-linux-x64.rpm
wget -c --no-check-certificate --no-cookie --header "Cookie: s_nr=1420682671945; s_cc=true; oraclelicense=accept-securebackup-cookie; gpw_e24=http%3A%2F%2Fwww.oracle.com%2Ftechnetwork%2Fjava%2Fjavase%2Fdownloads%2Fjdk7-downloads-1880260.html;s_sq=%5B%5BB%5D%5D" http://download.oracle.com/otn-pub/java/jdk/8u92-b14/jdk-8u92-linux-x64.rpm
chmod +x jdk-8u92-linux-x64.rpm
rpm -ivh jdk-8u92-linux-x64.rpm
vim /etc/profile
#追加以下信息
export JAVA_HOME=/usr/java/jdk1.8.0_92
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
#立即生效
source /etc/profile
#查看版本信息
java -version java version "1.8.0_92" Java(TM) SE Runtime Environment (build 1.8.0_92-b14) Java HotSpot(TM) 64-Bit Server VM (build 25.92-b14, mixed mode)
2.安装Amoeba
- wget https://sourceforge.net/projects/amoeba/files/Amoeba%20for%20mysql/3.x/amoeba-mysql-3.0.5-RC-distribution.zip
unzip amoeba-mysql-3.0.5-RC-distribution.zip
mv amoeba-mysql-3.0.5-RC /usr/local/amoeba/
3.配置Amoeba
vim /usr/local/amoeba/conf/dbServers.xml
修改以下黑体信息
<?xml version="1.0" encoding="gbk"?>
<!DOCTYPE amoeba:dbServers SYSTEM "dbserver.dtd">
<amoeba:dbServers xmlns:amoeba="http://amoeba.meidusa.com/">
<!--
Each dbServer needs to be configured into a Pool,
If you need to configure multiple dbServer with load balancing that can be simplified
by the following configuration:
add attribute with name virtual = "true" in dbServer, but the configuration does not
allow the element with name factoryConfig
such as 'multiPool' dbServer
-->
<dbServer name="abstractServer" abstractive="true">
<factoryConfig class="com.meidusa.amoeba.mysql.net.MysqlServerConnectionFactory">
<property name="connectionManager">${defaultManager}</property>
<property name="sendBufferSize">64</property>
<property name="receiveBufferSize">128</property>
<!-- mysql port -->
<property name="port">3306</property>
<!-- mysql schema -->
<property name="schema">amoeba</property>
<!-- mysql user -->
<property name="user">amoeba</property>
<property name="password">12345</property>
</factoryConfig>
<poolConfig class="com.meidusa.toolkit.common.poolable.PoolableObjectPool">
<property name="maxActive">500</property>
<property name="maxIdle">500</property>
<property name="minIdle">1</property>
<property name="minEvictableIdleTimeMillis">600000</property>
<property name="timeBetweenEvictionRunsMillis">600000</property>
<property name="testOnBorrow">true</property>
<property name="testOnReturn">true</property>
<property name="testWhileIdle">true</property>
</poolConfig>
</dbServer>
<dbServer name="writedb" parent="abstractServer">
<factoryConfig>
<!-- mysql ip -->
<property name="ipAddress">192.168.1.160</property>
</factoryConfig>
</dbServer>
<dbServer name="slave1" parent="abstractServer">
<factoryConfig>
<!-- mysql ip -->
<property name="ipAddress">192.168.1.151</property>
</factoryConfig>
</dbServer>
<dbServer name="slave2" parent="abstractServer">
<factoryConfig>
<!-- mysql ip -->
<property name="ipAddress">192.168.1.152</property>
</factoryConfig>
</dbServer>
<dbServer name="slave3" parent="abstractServer">
<factoryConfig>
<!-- mysql ip -->
<property name="ipAddress">192.168.1.153</property>
</factoryConfig>
</dbServer>
<dbServer name="slave4" parent="abstractServer">
<factoryConfig>
<!-- mysql ip -->
<property name="ipAddress">192.168.1.154</property>
</factoryConfig>
</dbServer>
<dbServer name="myslaves" virtual="true">
<poolConfig class="com.meidusa.amoeba.server.MultipleServerPool">
<!-- Load balancing strategy: 1=ROUNDROBIN , 2=WEIGHTBASED , 3=HA-->
<property name="loadbalance">1</property>
<!-- Separated by commas,such as: server1,server2,server1 -->
<property name="poolNames">slave1,slave2,slave3,slave4</property>
</poolConfig>
</dbServer>
</amoeba:dbServers>
vim /usr/local/amoeba/conf/amoeba.xml
修改以下黑体信息
<?xml version="1.0" encoding="gbk"?>
<!DOCTYPE amoeba:configuration SYSTEM "amoeba.dtd">
<amoeba:configuration xmlns:amoeba="http://amoeba.meidusa.com/">
<proxy>
<!-- service class must implements com.meidusa.amoeba.service.Service -->
<service name="Amoeba for Mysql" class="com.meidusa.amoeba.mysql.server.MySQLService">
<!-- port -->
<property name="port">8066</property>
<!-- bind ipAddress -->
<!--
<property name="ipAddress">127.0.0.1</property>
-->
<property name="connectionFactory">
<bean class="com.meidusa.amoeba.mysql.net.MysqlClientConnectionFactory">
<property name="sendBufferSize">128</property>
<property name="receiveBufferSize">64</property>
</bean>
</property>
<property name="authenticateProvider">
<bean class="com.meidusa.amoeba.mysql.server.MysqlClientAuthenticator">
<property name="user">root</property>
<property name="password">1234567890</property>
<property name="filter">
<bean class="com.meidusa.toolkit.net.authenticate.server.IPAcc
essController"><property name="ipFile">${amoeba.home}/conf/access_lis
t.conf</property></bean>
</property>
</bean>
</property>
</service>
<runtime class="com.meidusa.amoeba.mysql.context.MysqlRuntimeContext">
<!-- proxy server client process thread size -->
<property name="executeThreadSize">128</property>
<!-- per connection cache prepared statement size -->
<property name="statementCacheSize">500</property>
<!-- default charset -->
<property name="serverCharset">utf8</property>
<!-- query timeout( default: 60 second , TimeUnit:second) -->
<property name="queryTimeout">60</property>
</runtime>
</proxy>
<!--
Each ConnectionManager will start as thread
manager responsible for the Connection IO read , Death Detection
-->
<connectionManagerList>
<connectionManager name="defaultManager" class="com.meidusa.toolkit.net.MultiConnectionManager
Wrapper"><property name="subManagerClassName">com.meidusa.toolkit.net.AuthingableConnectionMana
ger</property></connectionManager>
</connectionManagerList>
<!-- default using file loader -->
<dbServerLoader class="com.meidusa.amoeba.context.DBServerConfigFileLoader">
<property name="configFile">${amoeba.home}/conf/dbServers.xml</property>
</dbServerLoader>
<queryRouter class="com.meidusa.amoeba.mysql.parser.MysqlQueryRouter">
<property name="ruleLoader">
<bean class="com.meidusa.amoeba.route.TableRuleFileLoader">
<property name="ruleFile">${amoeba.home}/conf/rule.xml</property>
<property name="functionFile">${amoeba.home}/conf/ruleFunctionMap.xml</propert
y></bean>
</property>
<property name="sqlFunctionFile">${amoeba.home}/conf/functionMap.xml</property>
<property name="LRUMapSize">1500</property>
<property name="defaultPool">writedb</property>
<property name="writePool">writedb</property>
<property name="readPool">myslaves</property>
<property name="needParse">true</property>
</queryRouter>
</amoeba:configuration>
4.设置Amoeba登录数据库权限(仅在mysql写入主节点,其他节点会自动复制)
mysql -uroot -p
create database amoeba;
create user amoeba@"192.168.1.199" identified by '12345';
grant all on amoeba.* to amoeba@"192.168.1.199";
#给所有权限(测试会用到)
- grant all on *.* to amoeba@"192.168.1.199";
flush privileges;
exit
5.开启amoeba防火墙
firewall-cmd --permanent --add-port=8066/tcp
firewall-cmd --reload
6.启动Amoeba
/usr/local/amoeba/bin/launcher &
netstat -tlunp | grep java tcp6 0 0 :::8066 :::* LISTEN 2666/java
报错解决方法
vim /usr/local/amoeba/jvm.properties
将下面内容修改成最下面
JVM_OPTIONS="-server -Xms256m -Xmx1024m -Xss196k -XX:PermSize=16m -XX:MaxPermSize=96m"
JVM_OPTIONS="-server -Xms1024m -Xmx1024m -Xss256k -XX:PermSize=16m -XX:MaxPermSize=96m"
7.验证
- #在mysql进行如下操作
- #在slave1,2从库执行
- insert into test.mmm_test (id) values ("slave");
- #开始验证
mysql -uroot -p1234567890 -h192.168.1.199 -P8066
select * from test.mmm_test;
MySQL [(none)]> select * from test.mmm_test; +--------+ | id | +--------+ | masetr | | slave | +--------+ 2 rows in set (0.07 sec) MySQL [(none)]> select * from test.mmm_test; +--------+ | id | +--------+ | masetr | | slave | +--------+ 2 rows in set (0.03 sec) MySQL [(none)]> select * from test.mmm_test; +--------+ | id | +--------+ | masetr | +--------+ 1 row in set (0.04 sec) MySQL [(none)]> select * from test.mmm_test; +--------+ | id | +--------+ | masetr | +--------+ 1 row in set (0.04 sec)
五 MySQL读写分离完整高可用集群架构
(电脑最多支持6台虚拟机,再多就翘翘了,就不测试)