读写分离的理解与ssm程序中实现
1、什么是读写分离
读写分离即数据库分为读库与写库,也有叫从库与主库的。主库(写库)主要用于数据的插入、修改和删除,从库(读库)主要用于数据的查询。因为在程序中关于查询的操作占用了程序资源的大部分,分离读库与写库有助于提升程序的执行效率。读库中的数据是写库做修改后热备份到读库中。
2、读写分离的代码实现
实现所需的技术栈:Spring+Mybatis+MySql
Druid连接池在Spring中配置
<bean name="abstractDataSource" abstract="true" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
<!-- 初始化连接大小 -->
<property name="initialSize" value="${db.initialSize}" />
<!-- 连接池最大使用连接数量 -->
<property name="maxActive" value="${db.maxActive}" />
<!-- 获取连接最大等待时间 -->
<property name="maxWait" value="${db.maxWait}" />
<!-- TOOD -->
<property name="proxyFilters">
<list>
<ref bean="wall-filter"/>
</list>
</property>
<property name="validationQuery" value="SELECT 1" />
<property name="testOnBorrow" value="${db.testOnBorrow}" />
<property name="testOnReturn" value="${db.testOnReturn}" />
<property name="testWhileIdle" value="${db.testWhileIdle}" />
<!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->
<property name="timeBetweenEvictionRunsMillis" value="${db.timeBetweenEvictionRunsMillis}" />
<!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->
<property name="minEvictableIdleTimeMillis" value="${db.minEvictableIdleTimeMillis}" />
<!-- 打开removeAbandoned功能 -->
<property name="removeAbandoned" value="${db.removeAbandoned}" />
<!-- 1800秒,也就是30分钟 -->
<property name="removeAbandonedTimeout" value="${db.removeAbandonedTimeout}" />
<!-- 关闭abanded连接时输出错误日志 -->
<property name="logAbandoned" value="${db.logAbandoned}" />
<property name="filters" value="wall" />
</bean>
<bean id="wall-filter" class="com.alibaba.druid.wall.WallFilter">
<property name="dbType" value="mysql"/>
<property name="config" ref="wall-config"/>
</bean>
<bean id="wall-config" class="com.alibaba.druid.wall.WallConfig">
<!-- 批量sql -->
<property name="multiStatementAllow" value="true"/>
</bean>
<!-- 主库 -->
<bean id="master" parent="abstractDataSource">
<property name="url" value="${datasource.master.url}"/>
<property name="username" value="${datasource.master.username}"/>
<property name="password" value="${datasource.master.password}"/>
</bean>
<!-- 从库1 -->
<bean id="slave1" parent="abstractDataSource">
<property name="url" value="${datasource.slave1.url}"/>
<property name="username" value="${datasource.slave1.username}"/>
<property name="password" value="${datasource.slave1.password}"/>
</bean>
<!-- 从库2 -->
<bean id="slave2" parent="abstractDataSource">
<property name="url" value="${datasource.slave2.url}"/>
<property name="username" value="${datasource.slave2.username}"/>
<property name="password" value="${datasource.slave2.password}"/>
</bean>
<!-- 配置动态数据源[master,slave 动态数据源中[DynamicDataSource]] -->
<bean id="dynamicDataSource" class="com.aa.bb.datasoure.DynamicDataSource">
<property name="masterDataSource" ref="master"/>
<property name="slaveDataSources">
<list>
<ref bean="slave1"/>
<ref bean="slave2"/>
</list>
</property>
<!--1 轮询方式 0 随机-->
<property name="slaveDataSourcePollPattern" value="1"/>
</bean>
<!-- dataSource 采用 懒加载 -->
<bean id="dataSource" class="org.springframework.jdbc.datasource.LazyConnectionDataSourceProxy">
<property name="targetDataSource">
<ref bean="dynamicDataSource"/>
</property>
</bean>
<!-- 配置SessionFactory -->
<bean id="sqlSessionFactory" class="org.mybatis.spring.SqlSessionFactoryBean">
<property name="dataSource" ref="dataSource"></property>
<property name="configLocation" value="classpath:mybaits/mybatis-config.xml"></property>
<property name="mapperLocations" value="classpath:com/aa/bb/mapper/*.xml"/>
</bean>
<!-- 扫描dao.java -->
<bean class="org.mybatis.spring.mapper.MapperScannerConfigurer">
<property name="basePackage" value="com.aa.bb.dao"/>
<property name="sqlSessionFactoryBeanName" value="sqlSessionFactory"></property>
</bean>
jdbc.properties配置数据库信息
#主库
datasource.master.driverClassName=com.mysql.jdbc.Driver
datasource.master.url=jdbc:mysql://主库ip:3306/库名?useUnicode=true&characterEncoding=utf8
datasource.master.username=root
datasource.master.password=root
#从库1
datasource.slave1.driverClassName=com.mysql.jdbc.Driver
datasource.slave1.url=jdbc:mysql://从库1ip:3306/库名?useUnicode=true&characterEncoding=utf8
datasource.slave1.username=root
datasource.slave1.password=123456
#从库2
datasource.slave2.driverClassName=com.mysql.jdbc.Driver
datasource.slave2.url=jdbc:mysql://从库2ip:3306/库名?useUnicode=true&characterEncoding=utf8
datasource.slave2.username=root
datasource.slave2.password=123456
db.initialSize=0
db.maxActive=20
db.maxWait=60000
db.testOnBorrow=false
db.testOnReturn=false
db.testWhileIdle=true
db.timeBetweenEvictionRunsMillis=60000
db.minEvictableIdleTimeMillis=25200000
db.removeAbandoned=true
db.removeAbandonedTimeout=5000
db.logAbandoned=true
创建DynamicDataSource数据源类
package com.aa.bb.datasoure;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
public class DynamicDataSource extends AbstractRoutingDataSource {
// 写数据源
private Object masterDataSource;
// 多个读数据源
private List<Object> slaveDataSources;
// 读数据源个数
private int slaveDataSourceSize;
// 获取读数据源方式,0:随机,1:轮询
private int slaveDataSourcePollPattern = 0;
private AtomicLong counter = new AtomicLong(0);
private static final Long MAX_POOL = Long.MAX_VALUE;
private final Lock lock = new ReentrantLock();
@Override
public void afterPropertiesSet() {
if (this.masterDataSource == null) {
throw new IllegalArgumentException("Property 'masterDataSource' is required");
}
setDefaultTargetDataSource(masterDataSource);
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DynamicDataSourceGlobal.DB_MASTET.name(), masterDataSource);
if (this.slaveDataSources == null) {
slaveDataSourceSize = 0;
} else {
for (int i = 0; i < slaveDataSources.size(); i++) {
targetDataSources.put(DynamicDataSourceGlobal.DB_SLAVE.name() + i, slaveDataSources.get(i));
}
slaveDataSourceSize = slaveDataSources.size();
}
setTargetDataSources(targetDataSources);
super.afterPropertiesSet();
}
@Override
protected Object determineCurrentLookupKey() {
DynamicDataSourceGlobal dynamicDataSourceGlobal = DynamicDataSourceHolder.getDbType();
if (dynamicDataSourceGlobal == null || dynamicDataSourceGlobal == DynamicDataSourceGlobal.DB_MASTET
|| slaveDataSourceSize <= 0) {
return DynamicDataSourceHolder.DB_MASTET;
}
int index = 0;
if (slaveDataSourcePollPattern == 1) {
// 轮询方式
long currValue = counter.incrementAndGet();
if ((currValue + 1) >= MAX_POOL) {
try {
lock.lock();
if ((currValue + 1) >= MAX_POOL) {
counter.set(0);
}
} finally {
lock.unlock();
}
}
index = (int)(currValue % slaveDataSourceSize);
} else {
// 随机方式
index = ThreadLocalRandom.current().nextInt(0, slaveDataSourceSize);
}
return dynamicDataSourceGlobal.name() + index;
}
public void setMasterDataSource(Object masterDataSource) {
this.masterDataSource = masterDataSource;
}
public void setSlaveDataSources(List<Object> slaveDataSources) {
this.slaveDataSources = slaveDataSources;
}
public void setSlaveDataSourcePollPattern(int slaveDataSourcePollPattern) {
this.slaveDataSourcePollPattern = slaveDataSourcePollPattern;
}
}
创建DynamicDataSourceGlobal主从库枚举
package com.aa.bb.datasoure;
public enum DynamicDataSourceGlobal {
/**从库标识***/
DB_SLAVE,
/**主库标识***/
DB_MASTET;
}
创建DynamicDataSourceHolder类
package com.aa.bb.datasoure;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DynamicDataSourceHolder {
private static Logger logger = LoggerFactory.getLogger(DynamicDataSourceHolder.class);
private static ThreadLocal<DynamicDataSourceGlobal> contextHolder = new ThreadLocal<DynamicDataSourceGlobal>();
public static final String DB_MASTET = "master";
public static final String DB_SLAVE = "slave";
/**
* @return
* @desc 获取线程的dbType
*/
public static DynamicDataSourceGlobal getDbType() {
DynamicDataSourceGlobal db = contextHolder.get();
if (db == null) {
db = DynamicDataSourceGlobal.DB_MASTET;
}
return db;
}
/**
* @param str
* @desc 设置线程的dbType
*/
public static void setDbType(DynamicDataSourceGlobal str) {
logger.debug("所使用的数据源是:" + str);
contextHolder.set(str);
}
/**
* 清理连接类型
*/
public static void clearDBType() {
contextHolder.remove();
}
}
创建拦截器类DynamicDataSourceInterceptor
package com.aa.bb.datasoure;
import java.util.Locale;
import java.util.Properties;
import org.apache.ibatis.executor.Executor;
import org.apache.ibatis.executor.keygen.SelectKeyGenerator;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.mapping.SqlCommandType;
import org.apache.ibatis.plugin.Interceptor;
import org.apache.ibatis.plugin.Intercepts;
import org.apache.ibatis.plugin.Invocation;
import org.apache.ibatis.plugin.Plugin;
import org.apache.ibatis.plugin.Signature;
import org.apache.ibatis.session.ResultHandler;
import org.apache.ibatis.session.RowBounds;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.transaction.support.TransactionSynchronizationManager;
/**
* @desc 设置MyBatis 的拦截器(需要使用DataSource 路由就需要通过MyBatis 拦截器,拦截SQL 请求,根据SQL 类型选择不同的数据源) DynamicDataSourceInterceptor
* 拦截器写好没用需要在 MyBatis 配置文件中配置方可使用 <plugins> <plugin interceptor="DynamicDataSourceInterceptor拦截器类路径" /> </plugins>
*/
@Intercepts({@Signature(type = Executor.class, method = "update", args = {MappedStatement.class, Object.class}),
@Signature(type = Executor.class, method = "query",
args = {MappedStatement.class, Object.class, RowBounds.class, ResultHandler.class}),})
public class DynamicDataSourceInterceptor implements Interceptor {
Logger logger = LoggerFactory.getLogger(DynamicDataSourceInterceptor.class);
// u0020 表示空格
private static final String REGEX = ".*insert\\u0020.*|.*delete\\u0020.*|.*update\\u0020.*";
/**
* @Title: intercept
* @Description: 决定使用那个数据源
* @param invocation
* @return
*/
@Override
public Object intercept(Invocation invocation) throws Throwable {
// 判断是不是事务。true 是有事务
boolean synchronizationActive = TransactionSynchronizationManager.isActualTransactionActive();
// 决定DataSource
DynamicDataSourceGlobal lookupkey = DynamicDataSourceGlobal.DB_MASTET;
// 获取CRUD 的参述
Object[] objects = invocation.getArgs();
MappedStatement ms = (MappedStatement)objects[0];
if (!synchronizationActive) {
// 读操作
if (ms.getSqlCommandType().equals(SqlCommandType.SELECT)) {
// selectKey 为自增ID 查询主键(select LAST_INSERT_ID())方法,使用主库
if (ms.getId().contains(SelectKeyGenerator.SELECT_KEY_SUFFIX)) {
lookupkey = DynamicDataSourceGlobal.DB_MASTET;
} else {
// objects 第二个参述就是 SQL
BoundSql boundSql = ms.getSqlSource().getBoundSql(objects[1]);
// 根据中国时区,将所有制表符,换行符替换成空格
String sql = boundSql.getSql().toLowerCase(Locale.CHINA).replaceAll("[\\t\\n\\r]", " ");
// 正则 判断是否是insert,delete,update 开头,是使用主库,不是使用从库
if (sql.matches(REGEX)) {
// 写
lookupkey = DynamicDataSourceGlobal.DB_MASTET;
} else {
// 读
lookupkey = DynamicDataSourceGlobal.DB_SLAVE;
}
}
}
} else {
// 有事务,表示非查询
lookupkey = DynamicDataSourceGlobal.DB_MASTET;
}
logger.debug("设置方法{} use{} Strategy,SqlCommanType{}..", ms.getId(), lookupkey, ms.getSqlCommandType().name());
DynamicDataSourceHolder.setDbType(lookupkey);
return invocation.proceed();
}
/**
* @desc 返回代理对象
*/
@Override
public Object plugin(Object target) {
// Executor,Plugin 是MyBatis 提供的类
/*
* 当拦截的对象是 Executor 这个类型,就进行拦截,就将 intercept 包装到 wrap 中去。如果不是就直接返回本体,不受拦截
* Executor 在MyBatis 中是用来支持一系列增删改查操作。意识就是检测是有有CRUD操作,有就拦截,没有就放过
*/
if (target instanceof Executor) {
return Plugin.wrap(target, this);
} else {
return target;
}
}
@Override
public void setProperties(Properties properties) {
}
}
配置mybatis拦截器
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<settings>
<setting name="logImpl" value="LOG4J2"/>
<setting name="mapUnderscoreToCamelCase" value="true"/>
</settings>
<plugins>
<plugin interceptor="com.aa.bb.datasoure.DynamicDataSourceInterceptor">
</plugin>
</plugins>
</configuration>
总结:
该方式实现的读写分离,可以配置一个写库,多个读库,读库访问顺序可以随机也可以轮询。并且是在mybatis拦截器中根据执行的SQL究竟是select还是其他的update|insert|delete语句来判断究竟需要访问读库还是写库,选定数据源。但是该方法有其局限性,使用该方法的读写分离,如果在同一service方法中同时对同一表进行写入和读取,由于该方法是底层SQL来选定读库和写库的,如果读库中还未同步过来写库中刚刚插入的数据,就会出现查询sql没有查出数据。该方法对读库同步写库数据的效率要求很高。