项目中数据增量不断增加,导致单库压力越来越大,要解决单机压力的问题广泛采用分库分表读写分离或者切换其他NEWSQL来解决如tidb等,生产环境读写分离已做,分表分库涉及外键一大堆拆分耗费极大工程量!tidb标配6台服务器,以目前业务量来说浪费了,也没具体测试性能环境,最终决定在现有mysql来做文章
一,先安装mycat做个读写分离
wget http://dl.mycat.io/1.6.6.1/Mycat-server-1.6.6.1-release-20181031195535-unix.tar.gz
tar -zxvf Mycat-server-1.6.6.1-release-20181031195535-linux.tar.gz
cd /opt/mycat/bin/mycat
配置与解读
/opt/mycat/conf
-----------server.xml----------配置数据库相关账号
<?xml version="1.0" encoding="UTF-8"?>
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License. - You
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
- - Unless required by applicable law or agreed to in writing, software -
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
License for the specific language governing permissions and - limitations
under the License. -->
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server xmlns:mycat="http://io.mycat/">
<system>
<property name="nonePasswordLogin">0</property> <!-- 0为需要密码登陆、1为不需要密码登陆 ,默认为0,设置为1则需要指定默认账户-->
<property name="useHandshakeV10">1</property>
<property name="useSqlStat">0</property> <!-- 1为开启实时统计、0为关闭 -->
<property name="useGlobleTableCheck">0</property> <!-- 1为开启全加班一致性检测、0为关闭 -->
<property name="sequnceHandlerType">2</property>
<property name="subqueryRelationshipCheck">false</property> <!-- 子查询中存在关联查询的情况下,检查关联字段中是否有分片字段 .默认 false -->
<!-- <property name="useCompression">1</property>--> <!--1为开启mysql压缩协议-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>--> <!--设置模拟的MySQL版本号-->
<!-- <property name="processorBufferChunk">40960</property> -->
<!--
<property name="processors">1</property>
<property name="processorExecutor">32</property>
-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena | type 2 NettyBufferPool -->
<property name="processorBufferPoolType">0</property>
<!--默认是65535 64K 用于sql解析时最大文本长度 -->
<!--<property name="maxStringLiteralLength">65535</property>-->
<!--<property name="sequnceHandlerType">0</property>-->
<!--<property name="backSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<property name="handleDistributedTransactions">0</property>
<!--
off heap for merge/order/group/limit 1开启 0关闭
-->
<property name="useOffHeapForMerge">1</property>
<!--
单位为m
-->
<property name="memoryPageSize">64k</property>
<!--
单位为k
-->
<property name="spillsFileBufferSize">1k</property>
<property name="useStreamOutput">0</property>
<!--
单位为m
-->
<property name="systemReserveMemorySize">384m</property>
<!--是否采用zookeeper协调切换 -->
<property name="useZKSwitch">false</property>
<!-- XA Recovery Log日志路径 -->
<!--<property name="XARecoveryLogBaseDir">./</property>-->
<!-- XA Recovery Log日志名称 -->
<!--<property name="XARecoveryLogBaseName">tmlog</property>-->
<!--如果为 true的话 严格遵守隔离级别,不会在仅仅只有select语句的时候在事务中切换连接-->
<property name="strictTxIsolation">false</property>
<property name="useZKSwitch">true</property>
</system>
<!-- 全局SQL防火墙设置 -->
<!--白名单可以使用通配符%或着*-->
<!--例如<host host="127.0.0.*" user="root"/>-->
<!--例如<host host="127.0.*" user="root"/>-->
<!--例如<host host="127.*" user="root"/>-->
<!--例如<host host="1*7.*" user="root"/>-->
<!--这些配置情况下对于127.0.0.1都能以root账户登录-->
<!--
<firewall>
<whitehost>
<host host="1*7.0.0.*" user="root"/>
</whitehost>
<blacklist check="false">
</blacklist>
</firewall>
-->
<!-- <user name="root" defaultAccount="true">
<property name="password">123456</property>
<property name="schemas">TESTDB</property>-->
<!-- 表级 DML 权限设置 -->
<!-- <privileges check="false">
<schema name="TESTDB" dml="0110" >
<table name="tb01" dml="0000"></table>
<table name="tb02" dml="1111"></table>
</schema>
</privileges>
</user>-->
<user name="root">
<property name="password">a12345</property>
<property name="schemas">alipos</property>
</user>
//这里也可以添加只读账号
</mycat:server>
---------------------schema.xml-----------------------配置读写分离,分表分库多主备负载等
balance属性负载均衡类型,目前的取值有3种:
1.balance=“0”,不开启读写分离机制,所有读操作都发送到当前可用的writeHost上。
2.balance=“1”,全部的readHost与stand by writeHost参与select语句的负载均衡,简单的说,当双主双从模式(M1->S1,M2->S2,并且M1与M2互为主备),
正常情况下,M2,S1,S2都参与select语句的负载均衡。
3.balance=“2”,所有读操作都随机的在writeHost、readhost上分发。
4.balance=“3”,所有读请求随机的分发到wiriterHost对应的readhost执行,
writerHost不负担读压力,注意balance=3只在1.4及其以后版本有,1.3没有。
writeType属性负载均衡类型,目前的取值有3种:
1.writeType=“0”, 所有写操作发送到配置的第一个writeHost,第一个挂了切到还生存的第二个writeHost,重新启动后已切换后的为准,切换记录在配置文件中:dnindex.properties.
2.writeType=“1”,所有写操作都随机的发送到配置的writeHost,1.5以后废弃不推荐。
3.writeType=“2”,不执行写操作
switchType属性
-1 表示不自动切换
1 默认值,自动切换
2 基于MySQL主从同步的状态决定是否切换
3 基于MySQLgalarycluster的切换机制(适合集群)(1.4.1)
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<schema name="alipos" checkSQLschema="false" sqlMaxLimit="100">
<!-- auto sharding by id (long) -->
<table name="alp_merchant_order_test" primaryKey="amoid" subTables="alp_merchant_order$1-13" dataNode="dn1" rule="sharding-by-month">
<!--<childTable name="alp_merchant_order_activity" primaryKey="amoaid" joinKey="amoid" parentKey="amoid"/>
<childTable name="alp_merchant_order_item" primaryKey="moiid" joinKey="amoid" parentKey="amoid"/>-->
</table>
<!--<table name="alp_merchant_order_activity" primaryKey="amoaid" dataNode="dn1"/>
<table name="alp_merchant_order_item" primaryKey="moiid" dataNode="dn1"/>-->
</schema>
<!-- <dataNode name="dn1$0-743" dataHost="localhost1" database="db$0-743"
/> -->
<dataNode name="dn1" dataHost="localhost1" database="alipos" />
<!--<dataNode name="dn4" dataHost="sequoiadb1" database="SAMPLE" />
<dataNode name="jdbc_dn1" dataHost="jdbchost" database="db1" />
<dataNode name="jdbc_dn2" dataHost="jdbchost" database="db2" />
<dataNode name="jdbc_dn3" dataHost="jdbchost" database="db3" /> -->
<dataHost name="localhost1" maxCon="1000" minCon="10" balance="3"
writeType="0" dbType="mysql" dbDriver="native" switchType="0" slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<!-- can have multi write hosts -->
<writeHost host="node1" url="192.168.1.230:3306" user="root" password="a12345">
<!-- can have multi read hosts -->
<readHost host="node1" url="192.168.1.231:3306" user="root" password="a12345" />
<readHost host="node1" url="192.168.1.232:3306" user="root" password="a12345" />
<readHost host="node1" url="192.168.1.233:3306" user="root" password="a12345" />
</writeHost>
<writeHost host="node1" url="192.168.1.234:3306" user="root" password="a12345">
<!-- can have multi read hosts -->
<readHost host="node1" url="192.168.1.231:3306" user="root" password="a12345" />
<readHost host="node1" url="192.168.1.232:3306" user="root" password="a12345" />
<readHost host="node1" url="192.168.1.233:3306" user="root" password="a12345" />
</writeHost>
</dataHost>
<!--
<dataHost name="sequoiadb1" maxCon="1000" minCon="1" balance="0" dbType="sequoiadb" dbDriver="jdbc">
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="sequoiadb://1426587161.dbaas.sequoialab.net:11920/SAMPLE" user="jifeng" password="jifeng"></writeHost>
</dataHost>
<dataHost name="oracle1" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="oracle" dbDriver="jdbc"> <heartbeat>select 1 from dual</heartbeat>
<connectionInitSql>alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'</connectionInitSql>
<writeHost host="hostM1" url="jdbc:oracle:thin:@127.0.0.1:1521:nange" user="base" password="123456" > </writeHost> </dataHost>
<dataHost name="jdbchost" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="mongodb" dbDriver="jdbc">
<heartbeat>select user()</heartbeat>
<writeHost host="hostM" url="mongodb://192.168.0.99/test" user="admin" password="123456" ></writeHost> </dataHost>
<dataHost name="sparksql" maxCon="1000" minCon="1" balance="0" dbType="spark" dbDriver="jdbc">
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="jdbc:hive2://feng01:10000" user="jifeng" password="jifeng"></writeHost> </dataHost> -->
<!-- <dataHost name="jdbchost" maxCon="1000" minCon="10" balance="0" dbType="mysql"
dbDriver="jdbc"> <heartbeat>select user()</heartbeat> <writeHost host="hostM1"
url="jdbc:mysql://localhost:3306" user="root" password="123456"> </writeHost>
</dataHost> -->
</mycat:schema>
mycat单机风险可以再加一层负载haproxy 具体不在赘述!
接下来遇到的多主备的同步,生产环境采用阿里云的mysql,同步限制,制定同步等等问题无法使用mysql内置主从同步功能!那就手动写个工具
之前有用go-mysql,那就拿来二次开发使用!
代码工具见git:
使用自己写的工具同步手脚释放了很多!
直接可以用丛库做热库,只保留10天内数据,查询压力也随即释放!吞吐率成倍飙升。
目前没有遇到写入压力,如后期有写入压力可平滑移到mariadb多主模式