shardingSphere 官方声明支持任意实现JDBC规范的数据库,但是目前支持的数据库有MySQL,Oracle,SQLServer和PostgreSQL。对达梦数据库并不适配,如果要使用shardingSphere对达梦数据库进行操作,需要对shardingSphere 进行扩展,具体操作步骤如下:
1.注意一定要最新版本的驱动依赖!!!
<dependency>
<groupId>com.dameng</groupId>
<artifactId>DmJdbcDriver18</artifactId>
<version>8.1.3.62</version>
</dependency>
去mvn仓库去查看最新的驱动 或者去官网下载到本地再引入
https://mvnrepository.com/artifact/com.dameng/DmJdbcDriver18
2.在自己的项目中新建两个类:BranchDatabaseType,DataSourceMetaData
package cn.fateverse.log.shardingadapter;
import java.util.Collection;
import org.apache.shardingsphere.spi.database.type.BranchDatabaseType;
import org.apache.shardingsphere.spi.database.type.DatabaseType;
import org.apache.shardingsphere.underlying.common.database.type.DatabaseTypes;
import java.util.Collections;
/**
* @author lan
* @date 2024/1/28
*/
public class DMDatabaseType implements BranchDatabaseType{
public DMDatabaseType() {
}
//需要和驱动url中的jdbc:dm 保持一致,不区分大小写,这里用DM
public String getName() {
return "DM";
}
public Collection<String> getJdbcUrlPrefixAlias() {
return Collections.emptyList();
}
//达梦数据库的数据源元数据
public DMDataSourceMetaData getDataSourceMetaData(String url, String username) {
return new DMDataSourceMetaData(url);
}
//作为MySQL的子集,sql解析等操作使用MySQL的实现
@Override
public DatabaseType getTrunkDatabaseType() {
return DatabaseTypes.getActualDatabaseType("MySQL");
}
}
package cn.fateverse.log.shardingadapter;
import com.google.common.base.Strings;
import lombok.Getter;
import org.apache.shardingsphere.spi.database.metadata.DataSourceMetaData;
import org.apache.shardingsphere.underlying.common.database.metadata.UnrecognizedDatabaseURLException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author lan
* @date 2024/1/28
*/
/**
* Data source meta data for Oracle.
*/
@Getter
public final class DMDataSourceMetaData implements DataSourceMetaData {
private static final int DEFAULT_PORT = 5236;
private final String hostName;
private final int port;
private final String catalog;
private final String schema;
// private final Pattern pattern = Pattern.compile("jdbc:dm://([\\w\\-\\.]+):?([0-9]*)/([\\w\\-]+)", Pattern.CASE_INSENSITIVE);
private final Pattern pattern = Pattern.compile("jdbc:dm://([\\w\\-\\.]+):?([0-9]*)/?(\\w+=\\w+&?)*", Pattern.CASE_INSENSITIVE);
public DMDataSourceMetaData(final String url) {
Matcher matcher = pattern.matcher(url);
if (!matcher.find()) {
throw new UnrecognizedDatabaseURLException(url, pattern.pattern());
}
hostName = matcher.group(1);
port = Strings.isNullOrEmpty(matcher.group(2)) ? DEFAULT_PORT : Integer.valueOf(matcher.group(2));
catalog = matcher.group(3);
schema = null;
}
public String getHostName() {
return this.hostName;
}
public int getPort() {
return this.port;
}
public String getCatalog() {
return this.catalog;
}
public String getSchema() {
return this.schema;
}
public Pattern getPattern() {
return this.pattern;
}
}
利用spi机制,Resource下面创建META-INF/services 目录里创建一个以服务接口命名的文件,shardingSphere的文件名称为:org.apache.shardingsphere.spi.database.type.DatabaseType,在文件中加上DMDatabaseType类的路径。
效果图为:
以上步骤就是给shardingSphere进行了外部拓展,使其适配了达梦数据库。
3.配置文件
# 数据源
spring:
# 配置Sharding-JDBC的分片策略
# 配置数据源,给数据源起名g1,g2...此处可配置多数据源
shardingsphere:
datasource:
names: log1
# names: log1,log2
# 配置数据源具体内容————————包含 连接池, 驱动, 地址, 用户名, 密码
# 由于上面配置数据源只有g1因此下面只配置g1.type,g1.driver-class-name,g1.url,g1.username,g1.password
log1:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: dm.jdbc.driver.DmDriver
jdbc-url: jdbc:dm://1.19.196.93:36236?schema=LOG&zeroDateTimeBehavior=convertToNull&useUnicode=true&characterEncoding=utf-8
username: SYS11DBA
password: SYSD11BA
hikari:
minimum-idle: 5
maximum-pool-size: 20
log2:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: dm.jdbc.driver.DmDriver
username: SYSDqweBA
password: SYSqwDBA
jdbc-url: jdbc:dm://11.29.196.93:36236?schema=SLOG&zeroDateTimeBehavior=convertToNull&useUnicode=true&characterEncoding=utf-8
hikari:
minimum-idle: 5
maximum-pool-size: 20
sharding:
tables:
sys_login_infor:
# key-generator:
# column: info_id
# type: SNOWFLAKE
actual-data-nodes: log1.sys_login_infor_$->{1..2}
# actual-data-nodes: log$->{1..2}.sys_login_infor_$->{1..2}
# database-strategy:
# inline:
# sharding-column: login_time
# algorithm-expression: log$->{login_time % 2 +1}
table-strategy:
standard:
sharding-column: info_id
precise-algorithm-class-name: cn.fateverse.log.configuration.TablePreciseShardingAlgorithm
sys_operation_log:
# key-generator:
# column: user_id
# type: SNOWFLAKE
actual-data-nodes: log1.sys_operation_log_$->{1..2}
# actual-data-nodes: log$->{1..2}.sys_operation_log_$->{1..2}
# database-strategy:
# inline:
# sharding-column: oper_time
# algorithm-expression: log$->{oper_time % 2 +1}
table-strategy:
standard:
sharding-column: oper_id
precise-algorithm-class-name: cn.fateverse.log.configuration.TablePreciseShardingAlgorithm