上一篇写了分库分表以及不分库只分表的策略,这一篇我是按照公司目前日志是分表结构做了一个demo,以前数据库分表,我没有用中间件,自己硬生生做各种判断,加各种循环,做各种表分析来查的,还是容易出错或者不健壮,但是现在我用插件做这个demo,如果可以以后分表就可以用中间件操作了
1.在mysql中加入两个表测试表当作日志
2.三个测试字段,主键自增
3.然后老规矩还是pom
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.1.4.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<!-- shardingjdbc -->
<dependency>
<groupId>io.shardingjdbc</groupId>
<artifactId>sharding-jdbc-core</artifactId>
<version>2.0.3</version>
</dependency>
<!-- druid数据库连接池 -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.6</version>
</dependency>
<!-- MySQL的JDBC驱动包 -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
4.配置文件配置数据源
server:
port: 9999
spring:
application:
name: sharding-jdbc
jackson:
time-zone: GMT+8
date-format: yyyy-MM-dd HH:mm:ss
default-property-inclusion: non_null
datasource:
type: com.alibaba.druid.pool.DruidDataSource
ds0:
url: jdbc:mysql://localhost:3306/mall_0?serverTimezone=UTC&useUnicode=true&characterEncoding=utf8
driverClassName: com.mysql.cj.jdbc.Driver
username: root
password: root
ds1:
url: jdbc:mysql://xxxx:3307/mall_1?serverTimezone=UTC&useUnicode=true&characterEncoding=utf8
driverClassName: com.mysql.cj.jdbc.Driver
username: root
password: root
5.开始启动注入分表规则
package com.itdf.config;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import javax.sql.DataSource;
import com.alibaba.druid.support.http.StatViewServlet;
import io.shardingjdbc.core.api.ShardingDataSourceFactory;
import io.shardingjdbc.core.api.config.ShardingRuleConfiguration;
import io.shardingjdbc.core.api.config.TableRuleConfiguration;
import io.shardingjdbc.core.api.config.strategy.InlineShardingStrategyConfiguration;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.servlet.ServletRegistrationBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.alibaba.druid.filter.Filter;
import com.alibaba.druid.filter.stat.StatFilter;
import com.alibaba.druid.pool.DruidDataSource;
import com.google.common.collect.Lists;
import org.springframework.context.annotation.Primary;
/**
* 配置分库分表操作
* 首先第一步,先到配置文件去获取两个数据源的连接信息
* 第二步设置分库分表的规则
* 第三步将规则放入ShardingJDBC配置里,然后可以设置分库分表的sql显示等等
* 之后将数据源信息都分库分表信息全部传入ShardingJDBC配置,返回给我们DataSource资源
*
* @Author df
* @Date 2019/8/28 14:17
* @Version 1.0
*/
@Configuration
public class ShardingConfig {
@Value("${spring.datasource.ds0.url}")
private String ds0_url;
@Value("${spring.datasource.ds0.driverClassName}")
private String ds0_driverClassName;
@Value("${spring.datasource.ds0.username}")
private String ds0_username;
@Value("${spring.datasource.ds0.password}")
private String ds0_password;
@Value("${spring.datasource.ds1.url}")
private String ds1_url;
@Value("${spring.datasource.ds1.driverClassName}")
private String ds1_driverClassName;
@Value("${spring.datasource.ds1.username}")
private String ds1_username;
@Value("${spring.datasource.ds1.password}")
private String ds1_password;
/**
* shardingjdbc数据源
*
* @return
*/
@Bean
@Primary
public DataSource dataSource() throws SQLException {
// 封装dataSource
Map<String, DataSource> dataSourceMap = new HashMap<>();
DruidDataSource dataSource0 = createDb0();
dataSourceMap.put("ds0", dataSource0);
DruidDataSource dataSource1 = createDb1();
dataSourceMap.put("ds1", dataSource1);
TableRuleConfiguration tableRuleConf = getUserTableRuleConfiguration(getTimeTable.getTimeStr(dataSource0));
// 设置分库分表规则
//TableRuleConfiguration tableRuleConf = getUserTableRuleConfiguration();
// 将规则写入ShardingDataSource
ShardingRuleConfiguration shardingRuleConf = new ShardingRuleConfiguration();
shardingRuleConf.getTableRuleConfigs().add(tableRuleConf);
Properties p = new Properties();
p.setProperty("sql.show", Boolean.TRUE.toString());
// 获取数据源对象
try {
return ShardingDataSourceFactory.createDataSource(dataSourceMap, shardingRuleConf, new ConcurrentHashMap(), p);
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
/**
* 根据时间规则设置分表
*
* @return
*/
private TableRuleConfiguration getUserTableRuleConfiguration(String timeStr) {
TableRuleConfiguration tableRuleConfiguration = new TableRuleConfiguration();
// 设置逻辑表
tableRuleConfiguration.setLogicTable("test_log_");
// 注入时先获取所有的日志表名称,以便查询时能找到对应的表结构操作
tableRuleConfiguration.setActualDataNodes("ds0.test_log_${" + timeStr + "}");
// 设置纵列名称
tableRuleConfiguration.setKeyGeneratorColumnName("test_id");
// 因为数据库是test_log_201908也就是说201908日期是这种格式的,业务是添加按当前日期存储哪种表,
// 所以注入时要获取当前日期作为添加逻辑表后缀,以便添加时找到当前月的表进行插入操作
tableRuleConfiguration.setTableShardingStrategyConfig(new InlineShardingStrategyConfiguration("test_id", "test_log_${" + getTimeTable.getCurrTime() + "}"));
return tableRuleConfiguration;
}
/**
* 注入第一个数据源
*
* @return
*/
private DruidDataSource createDb0() {
// 配置第一个数据源
DruidDataSource dataSource = new DruidDataSource();
dataSource.setDriverClassName(ds0_driverClassName);
dataSource.setUrl(ds0_url);
dataSource.setUsername(ds0_username);
dataSource.setPassword(ds0_password);
dataSource.setProxyFilters(Lists.newArrayList(statFilter()));
// 每个分区最大的连接数
dataSource.setMaxActive(20);
// 每个分区最小的连接数
dataSource.setMinIdle(5);
return dataSource;
}
/**
* 注入第二个数据源
*
* @return
*/
private DruidDataSource createDb1() {
// 配置第一个数据源
DruidDataSource dataSource = new DruidDataSource();
dataSource.setDriverClassName(ds1_driverClassName);
dataSource.setUrl(ds1_url);
dataSource.setUsername(ds1_username);
dataSource.setPassword(ds1_password);
dataSource.setProxyFilters(Lists.newArrayList(statFilter()));
// 每个分区最大的连接数
dataSource.setMaxActive(20);
// 每个分区最小的连接数
dataSource.setMinIdle(5);
return dataSource;
}
@Bean
public Filter statFilter() {
StatFilter filter = new StatFilter();
filter.setSlowSqlMillis(5000);
filter.setLogSlowSql(true);
filter.setMergeSql(true);
return filter;
}
@Bean
public ServletRegistrationBean statViewServlet() {
//创建servlet注册实体
ServletRegistrationBean servletRegistrationBean = new ServletRegistrationBean(new StatViewServlet(), "/druid/*");
//设置ip白名单
servletRegistrationBean.addInitParameter("allow", "127.0.0.1");
//设置ip黑名单,如果allow与deny共同存在时,deny优先于allow
servletRegistrationBean.addInitParameter("deny", "192.168.0.19");
// 设置控制台管理用户
servletRegistrationBean.addInitParameter("loginUsername", "admin");
servletRegistrationBean.addInitParameter("loginPassword", "123456");
// 是否可以重置数据
servletRegistrationBean.addInitParameter("resetEnable", "false");
return servletRegistrationBean;
}
}
6.用java8获取当前时间,再获取数据库里所有关于日志表的表名进行操作
package com.itdf.config;
import javax.sql.DataSource;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* 获取需要的表名
*
* @Author df
* @Date 2019/8/30 15:44
* @Version 1.0
*/
public class getTimeTable {
public static String getTimeStr(DataSource dataSource) {
String str = "";
int intoNum = 0;
PreparedStatement pstmt = null;
try {
pstmt = dataSource.getConnection().prepareStatement("select table_name from information_schema.tables where table_schema='mall_0' and table_name like 'test_log_%'");
ResultSet resultSet = pstmt.executeQuery();
while (resultSet.next()) {
intoNum++;
if (intoNum == 1) {
str = resultSet.getString("table_name").substring(9, 15);
}
//201908..201909需要按规定弄成这样的格式
if (resultSet.isLast()) {
str += ".." + resultSet.getString("table_name").substring(9, 15);
}
}
} catch (SQLException e) {
e.printStackTrace();
try {
pstmt.close();
} catch (SQLException ex) {
ex.printStackTrace();
}
}
System.out.println(str);
return str;
}
/**
* java8 LocalDate获取当前时间
*
* @return
*/
public static String getCurrTime() {
LocalDate date = LocalDate.now();
System.out.println("当前日期=" + date.toString());
String dataStr = date.toString().replace("-", "");
dataStr = dataStr.substring(0, 6);
return dataStr;
}
}
7.然后贴出service添加和查询测试
@Resource
private ShardingConfig shardingConfig;
@Override
public void testInsert2(String test_name) throws SQLException, ParseException {
SimpleDateFormat sf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
String format = sf.format(new Date());
Connection connection = shardingConfig.dataSource().getConnection();
PreparedStatement preparedStatement = connection.prepareStatement("insert into test_log_(test_name,test_time) values('" + test_name + "','" + format + "')");
preparedStatement.executeUpdate();
preparedStatement.close();
connection.close();
}
// 根据时间查询
@Override
public List getBytime(String time) throws SQLException {
PreparedStatement pstmt = shardingConfig.dataSource().getConnection().prepareStatement("select * from test_log_ where test_time >= '" + time + " 00:00:00'");
ResultSet resultSet = pstmt.executeQuery();
List list = new ArrayList();
while (resultSet.next()) {
Map map = new HashMap();
map.put("test_id", resultSet.getLong("test_id"));
map.put("test_name", resultSet.getString("test_name"));
map.put("test_time", resultSet.getTimestamp("test_time"));
list.add(map);
}
resultSet.close();
shardingConfig.dataSource().getConnection().close();
return list;
}
8.贴出controller
@Autowired
private userService userService;
@RequestMapping("/add2")
@ResponseBody
public String add2(String test_name) {
try {
userService.testInsert2(test_name);
} catch (SQLException e) {
e.printStackTrace();
} catch (ParseException e) {
e.printStackTrace();
}
return "success";
}
@RequestMapping("/getBytime")
@ResponseBody
public List getBytime(String time) {
try {
return userService.getBytime(time);
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
9,访问添加接口
10.查看数据库,那么插入成功,因为当前时2019年8月,所以它存储的是8月份的表哦
11.查看查询接口,查询成功,接口查询是大于8月28号的,2019日志表也有数据,也可以查询出来
12.查看控制台shardingjdbc的sql日志也可发现。
好了,就到这里了,很简单把,shardingjdbc还是很方便的