Sharding-Sphere初始化(一)

1. 采用Spring配置文件使用该数据库中间件,配置如下

 <!-- 配置数据源一 -->
    <bean id="separate_entity_0" class="org.apache.commons.dbcp.BasicDataSource" destroy-method="close">
        <property name="driverClassName" value="com.mysql.jdbc.Driver"/>
        <property name="url" value="${sharding.jdbc.datasource.separate_entity_0.url}"/>
        <property name="username" value="${sharding.jdbc.datasource.separate_entity_0.username}"/>
        <property name="password" value="${sharding.jdbc.datasource.separate_entity_0.password}"/>
    </bean>
    <!-- 配置数据源二 -->
    <bean id="separate_entity_1" class="org.apache.commons.dbcp.BasicDataSource" destroy-method="close">
        <property name="driverClassName" value="com.mysql.jdbc.Driver"/>
        <property name="url" value="${sharding.jdbc.datasource.separate_entity_1.url}"/>
        <property name="username" value="${sharding.jdbc.datasource.separate_entity_1.username}"/>
        <property name="password" value="${sharding.jdbc.datasource.separate_entity_1.password}"/>
    </bean>
	
    <bean id="preciseModuloDatabaseShardingAlgorithm" class="com.sharding.demo.algorithm.DatabaseShardingAlgorithm" />
    <bean id="preciseModuloTableShardingAlgorithm" class="com.sharding.demo.algorithm.TableShardingAlgorithm" />
    
    <sharding:standard-strategy id="databaseShardingStrategy" sharding-column="user_id" precise-algorithm-ref="preciseModuloDatabaseShardingAlgorithm" />
    <sharding:standard-strategy id="tableShardingStrategy" sharding-column="order_id" precise-algorithm-ref="preciseModuloTableShardingAlgorithm" />
    
    <sharding:data-source id="shardingDataSource">
        <sharding:sharding-rule data-source-names="separate_entity_0,separate_entity_1">
            <sharding:table-rules>
                <sharding:table-rule logic-table="t_order" 
                	actual-data-nodes="${sharding.jdbc.datasource.actual.data.nodes.order}"
                	 database-strategy-ref="databaseShardingStrategy" table-strategy-ref="tableShardingStrategy"
                	  generate-key-column-name="order_id" />
            </sharding:table-rules>
        </sharding:sharding-rule>
    </sharding:data-source>




sharding.jdbc.datasource.names=separate_entity_0,separate_entity_1

sharding.jdbc.datasource.separate_entity_0.url=jdbc:mysql://127.0.0.1:3306/separate_entity_0
sharding.jdbc.datasource.separate_entity_0.username=root
sharding.jdbc.datasource.separate_entity_0.password=

sharding.jdbc.datasource.separate_entity_1.url=jdbc:mysql://127.0.0.1:3306/separate_entity_1
sharding.jdbc.datasource.separate_entity_1.username=root
sharding.jdbc.datasource.separate_entity_1.password=

sharding.jdbc.datasource.actual.data.nodes.order=separate_entity_$->{0..1}.t_order_$->{0..1}
sharding.jdbc.datasource.actual.data.nodes.orderitem=separate_entity_$->{0..1}.t_order_item_$->{0..1}

2. 启动服务后,首先开始初始化SpringShardingDataSource,本次只是普通的分库分表,不包含主从设置。

public SpringShardingDataSource(final Map<String, DataSource> dataSourceMap, 
                                    final ShardingRuleConfiguration shardingRuleConfig, final Map<String, Object> configMap, final Properties props) throws SQLException {
        super(getRawDataSourceMap(dataSourceMap), new ShardingRule(addMasterSlaveRuleConfigurations(dataSourceMap, shardingRuleConfig), dataSourceMap.keySet()), configMap, props);
    }

解析生成ShardingRule规则,因为xml文件只配置了分表规则,所以shardingRuleConfig里只包含分表规则,

public ShardingRule(final ShardingRuleConfiguration shardingRuleConfig, final Collection<String> dataSourceNames) {
        Preconditions.checkNotNull(dataSourceNames, "Data sources cannot be null.");
        Preconditions.checkArgument(!dataSourceNames.isEmpty(), "Data sources cannot be empty.");
        this.shardingRuleConfig = shardingRuleConfig;
        shardingDataSourceNames = new ShardingDataSourceNames(shardingRuleConfig, dataSourceNames);
        for (TableRuleConfiguration each : shardingRuleConfig.getTableRuleConfigs()) {
            tableRules.add(new TableRule(each, shardingDataSourceNames));
        }
        for (String group : shardingRuleConfig.getBindingTableGroups()) {
            List<TableRule> tableRulesForBinding = new LinkedList<>();
            for (String logicTableNameForBindingTable : StringUtil.splitWithComma(group)) {
                tableRulesForBinding.add(getTableRule(logicTableNameForBindingTable));
            }
            bindingTableRules.add(new BindingTableRule(tableRulesForBinding));
        }
        defaultDatabaseShardingStrategy = null == shardingRuleConfig.getDefaultDatabaseShardingStrategyConfig()
                ? new NoneShardingStrategy() : ShardingStrategyFactory.newInstance(shardingRuleConfig.getDefaultDatabaseShardingStrategyConfig());
        defaultTableShardingStrategy = null == shardingRuleConfig.getDefaultTableShardingStrategyConfig()
                ? new NoneShardingStrategy() : ShardingStrategyFactory.newInstance(shardingRuleConfig.getDefaultTableShardingStrategyConfig());
        defaultKeyGenerator = null == shardingRuleConfig.getDefaultKeyGenerator() ? new DefaultKeyGenerator() : shardingRuleConfig.getDefaultKeyGenerator();
        for (MasterSlaveRuleConfiguration each : shardingRuleConfig.getMasterSlaveRuleConfigs()) {
            masterSlaveRules.add(new MasterSlaveRule(each));
        }
    }

解析分表规则,逻辑表名,该表其实就是未分表时的表名,分表后就是实际表,根据表达式解析出对应的数据节点,每个数据节点有数据源名和表名组成,解析分库分表策略实现,该表是根据generateKeyColumn字段进行区分的,本例是order_id

public TableRule(final TableRuleConfiguration tableRuleConfig, final ShardingDataSourceNames shardingDataSourceNames) {
        Preconditions.checkNotNull(tableRuleConfig.getLogicTable(), "Logic table cannot be null.");
        logicTable = tableRuleConfig.getLogicTable().toLowerCase();
        List<String> dataNodes = new InlineExpressionParser(tableRuleConfig.getActualDataNodes()).evaluate();
        actualDataNodes = isEmptyDataNodes(dataNodes)
                ? generateDataNodes(tableRuleConfig.getLogicTable(), shardingDataSourceNames.getDataSourceNames()) : generateDataNodes(dataNodes, shardingDataSourceNames.getDataSourceNames());
        databaseShardingStrategy = null == tableRuleConfig.getDatabaseShardingStrategyConfig() ? null : ShardingStrategyFactory.newInstance(tableRuleConfig.getDatabaseShardingStrategyConfig());
        tableShardingStrategy = null == tableRuleConfig.getTableShardingStrategyConfig() ? null : ShardingStrategyFactory.newInstance(tableRuleConfig.getTableShardingStrategyConfig());
        generateKeyColumn = tableRuleConfig.getKeyGeneratorColumnName();
        keyGenerator = tableRuleConfig.getKeyGenerator();
        logicIndex = null == tableRuleConfig.getLogicIndex() ? null : tableRuleConfig.getLogicIndex().toLowerCase();
    }

保存数据源类型,即数据库类型,创建线程池ExecutorEngine,用来执行对每个数据源sql的请求。

 public ShardingDataSource(final Map<String, DataSource> dataSourceMap, final ShardingRule shardingRule, final Map<String, Object> configMap, final Properties props) throws SQLException {
        super(dataSourceMap.values());
        if (!configMap.isEmpty()) {
            ConfigMapContext.getInstance().getShardingConfig().putAll(configMap);
        }
        shardingProperties = new ShardingProperties(null == props ? new Properties() : props);
        int executorSize = shardingProperties.getValue(ShardingPropertiesConstant.EXECUTOR_SIZE);
        executorEngine = new ExecutorEngine(executorSize);
        ShardingMetaData shardingMetaData = new JDBCShardingMetaData(dataSourceMap, shardingRule, getDatabaseType());
        shardingMetaData.init(shardingRule);
        boolean showSQL = shardingProperties.getValue(ShardingPropertiesConstant.SQL_SHOW);
        shardingContext = new ShardingContext(dataSourceMap, shardingRule, getDatabaseType(), executorEngine, shardingMetaData, showSQL);
    }

检验四个数据节点的表结构是否一致shardingMetaData.init(shardingRule);最后根据逻辑表名把对应表的字段信息集合放入map中,主要牵扯的地方有JDBCShardingMetaData#getColumnMetaDataList,ShardingMetaDataHandlerFactory#newInstance,MySQLShardingMetaDataHandler#getExistColumnMeta,ShardingMetaDataHandler#getColumnMetaDataList,最后把所有需要的数据保存到上下文ShardingContext中。

 public void refresh(final TableRule each, final ShardingRule shardingRule, final Map<String, Connection> connectionMap) throws SQLException {
        tableMetaDataMap.put(each.getLogicTable(), getTableMetaData(each.getLogicTable(), each.getActualDataNodes(), shardingRule.getShardingDataSourceNames(), connectionMap));
    }

    private TableMetaData getTableMetaData(final String logicTableName, final List<DataNode> actualDataNodes,
                                           final ShardingDataSourceNames shardingDataSourceNames, final Map<String, Connection> connectionMap) throws SQLException {
        Collection<ColumnMetaData> result = null;
        for (DataNode each : actualDataNodes) {
            Collection<ColumnMetaData> columnMetaDataList = getColumnMetaDataList(each, shardingDataSourceNames, connectionMap);
            if (null == result) {
                result = columnMetaDataList;
            }
            if (!result.equals(columnMetaDataList)) {
                throw new ShardingException(getErrorMsgOfTableMetaData(logicTableName, result, columnMetaDataList));
            }
        }
        return new TableMetaData(result);
    }

3. 当进行sql执行操作时,首先获取ShardingDataSource数据库连接ShardingConnection

public ShardingConnection getConnection() {
        return new ShardingConnection(shardingContext);
    }

以下以【INSERT INTO t_order (user_id, status,order_id) VALUES (?,?,?)】为例,

 public PreparedStatement prepareStatement(final String sql) {
        return new ShardingPreparedStatement(this, sql);
    }

创建PreparedStatementRoutingEngine用于解析路由到具体的sql。

public ShardingPreparedStatement(final ShardingConnection connection, final String sql, final int resultSetType, final int resultSetConcurrency, final int resultSetHoldability) {
        this.connection = connection;
        this.resultSetType = resultSetType;
        this.resultSetConcurrency = resultSetConcurrency;
        this.resultSetHoldability = resultSetHoldability;
        ShardingContext shardingContext = connection.getShardingContext();
        routingEngine = new PreparedStatementRoutingEngine(
                sql, shardingContext.getShardingRule(), shardingContext.getShardingMetaData(), shardingContext.getDatabaseType(), shardingContext.isShowSQL());
    }

选择合适的sql解析路由ParsingSQLRouter

 public PreparedStatementRoutingEngine(final String logicSQL, final ShardingRule shardingRule, final ShardingMetaData shardingMetaData, final DatabaseType databaseType, final boolean showSQL) {
        this.logicSQL = logicSQL;
        shardingRouter = ShardingRouterFactory.createSQLRouter(shardingRule, shardingMetaData, databaseType, showSQL);
        masterSlaveRouter = new ShardingMasterSlaveRouter(shardingRule.getMasterSlaveRules());
    }

 public static ShardingRouter createSQLRouter(final ShardingRule shardingRule, final ShardingMetaData shardingMetaData, final DatabaseType databaseType, final boolean showSQL) {
        return HintManagerHolder.isDatabaseShardingOnly() ? new DatabaseHintSQLRouter(shardingRule, showSQL) : new ParsingSQLRouter(shardingRule, shardingMetaData, databaseType, showSQL);
    }

在ShardingPreparedStatement父类AbstractShardingPreparedStatementAdapter中设置参数具体值,不同类型的数据最后都是调用的下面的方法。

private void setParameter(final int parameterIndex, final Object value) {
        if (parameters.size() == parameterIndex - 1) {
            parameters.add(value);
            return;
        }
        for (int i = parameters.size(); i <= parameterIndex - 1; i++) {
            parameters.add(null);
        }
        parameters.set(parameterIndex - 1, value);
    }

4. 执行具体操作ShardingPreparedStatement#execute

public boolean execute() throws SQLException {
        try {
            Collection<PreparedStatementUnit> preparedStatementUnits = route();
            return new PreparedStatementExecutor(
                    getConnection().getShardingContext().getExecutorEngine(), routeResult.getSqlStatement().getType(), preparedStatementUnits).execute();
        } finally {
            JDBCShardingRefreshHandler.build(routeResult, connection).execute();
            clearBatch();
        }
    }

从PreparedStatementRoutingEngine中获取路由结果SQLRouteResult

public SQLRouteResult route(final List<Object> parameters) {
        if (null == sqlStatement) {
            sqlStatement = shardingRouter.parse(logicSQL, true);
        }
        return masterSlaveRouter.route(shardingRouter.route(logicSQL, parameters, sqlStatement));
    }
ParsingSQLRouter#parse先解析sql语句
 public SQLStatement parse(final String logicSQL, final boolean useCache) {
        return new SQLParsingEngine(databaseType, logicSQL, shardingRule, shardingMetaData).parse(useCache);
    }

查看缓存中是否有已经解析好的sql,进行mysql关键词解析new LexerEngine(new MySQLLexer(sql)),获取sql语句的类型currentToken = new Tokenizer(input, dictionary, offset).scanIdentifier();

public SQLStatement parse(final boolean useCache) {
        Optional<SQLStatement> cachedSQLStatement = getSQLStatementFromCache(useCache);
        if (cachedSQLStatement.isPresent()) {
            return cachedSQLStatement.get();
        }
        LexerEngine lexerEngine = LexerEngineFactory.newInstance(dbType, sql);
        lexerEngine.nextToken();
        SQLStatement result = SQLParserFactory.newInstance(dbType, lexerEngine.getCurrentToken().getType(), shardingRule, lexerEngine, shardingMetaData).parse();
        if (useCache) {
            ParsingResultCache.getInstance().put(sql, result);
        }
        return result;
    }

最后获取第一个常量段的字符串,并且匹配到关键词中,包装成相应的Token

public Token scanIdentifier() {
        if ('`' == charAt(offset)) {
            int length = getLengthUntilTerminatedChar('`');
            return new Token(Literals.IDENTIFIER, input.substring(offset, offset + length), offset + length);
        }
        int length = 0;
        while (isIdentifierChar(charAt(offset + length))) {
            length++;
        }
        String literals = input.substring(offset, offset + length);
        if (isAmbiguousIdentifier(literals)) {
            return new Token(processAmbiguousIdentifier(offset + length, literals), literals, offset + length);
        }
        return new Token(dictionary.findTokenType(literals, Literals.IDENTIFIER), literals, offset + length);
    }

匹配对应的sql类型DML,SQLParserFactory#newInstance,InsertParserFactory#newInstance,这里会创建解析insert语句每一区间段的对应类。

public MySQLInsertClauseParserFacade(final ShardingRule shardingRule, final LexerEngine lexerEngine) {
        super(new MySQLInsertIntoClauseParser(shardingRule, lexerEngine), new InsertColumnsClauseParser(shardingRule, lexerEngine), 
                new MySQLInsertValuesClauseParser(shardingRule, lexerEngine), new MySQLInsertSetClauseParser(shardingRule, lexerEngine));
    }

public MySQLInsertParser(final ShardingRule shardingRule, final LexerEngine lexerEngine, final ShardingMetaData shardingMetaData) {
        super(shardingRule, shardingMetaData, lexerEngine, new MySQLInsertClauseParserFacade(shardingRule, lexerEngine));
    }

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值