oceanus-58总体框架理解

1.总体思路:
通过约定的XML规则(分表分库规则)和 封装jdbc的Connection和PreparedStatement来实现SQL解析,sql路由和sql重写。

2. 3个核心类:ConnectionWrapper(JDBC Connection包装),PreparedStatementWrapper( JDBC PreparedStatement包装),SimpleExecutor(sql执行器,类似mybatis的SimpleExecutor)

3. 3个上下文传参数:ConnectionContext,StatementContext,transactionContext。

4. 真正干活的类:

(1)DefaultStatementContextBuilder类:解析SQL并保存BatchItem到StatementContext,--作者貌似直接用的mycat里面的sql解析的代码,直接拿来主义实现价值啊。

public StatementContext build(String sql, StatementContext context)
throws SQLException {
if (context == null) {
context = new StatementContext();
StatementContext.setContext(context);
if (logger.isDebugEnabled()) {
logger.debug("create context!sql=" + sql);
}
}
if (context.getCurrentBatch().getSql() == null) {
context.getCurrentBatch().setSql(sql);
}

StatementContextHandler handler = null;
if (context.isBatch()) {
handler = HandlerFactory.create(StatementType.BATCH);
StatementContext resultContext = handler.handle(sql, context);
processPreparedValues(resultContext);
return resultContext;
}

TrackerExecutor.trackBegin(TrackPoint.PARSE_SQL, sql);
SQLParser parser = StatementHelper.createSQLParser();
try {
DMLStatementNode statementNode = (DMLStatementNode) parser
.parseStatement(sql);
switch (statementNode.getNodeType()) {
case NodeTypes.CURSOR_NODE:
handler = HandlerFactory.create(StatementType.SELECT);
break;
case NodeTypes.DELETE_NODE:
handler = HandlerFactory.create(StatementType.DELETE);
break;
case NodeTypes.UPDATE_NODE:
handler = HandlerFactory.create(StatementType.UPDATE);
break;
case NodeTypes.INSERT_NODE:
handler = HandlerFactory.create(StatementType.INSERT);
break;
case NodeTypes.CALL_STATEMENT_NODE:
handler = HandlerFactory.create(StatementType.CALLABLE);
break;
}

StatementContext resultContext = handler.handle(statementNode,
context);

TrackerExecutor.trackEnd(TrackPoint.PARSE_SQL);

processPreparedValues(resultContext);
return resultContext;

} catch (StandardException se) {
System.out.println("sql parse error, sql:"+sql);
se.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
processPreparedValues(context);
return context;
}

(2)DefaultTargetDispatcher:根据batchItem和batchItem中的tableInfo获取路由信息和重写SQL,达到路由到指定的分库分表的目的


Set<RouteTarget> getSpecifyTargets(TableInfo tableInfo, BatchItem batchItem) {
Set<RouteTarget> targetSet = new LinkedHashSet<RouteTarget>();
Configurations configurations = Configurations.getInstance();

/**
* 解析where中符合分库分表字段的值
*/
Map<String, List<TableColumn>> resolveColumns = RouteHelper
.getResolveColumns(tableInfo.getOrgName(),
batchItem.getAnalyzeResult());
List<Map<String, Object>> parameters = RouteHelper
.getParameterValues(resolveColumns);
Set<Integer> indexs = new HashSet<Integer>();
TableDescription desc = configurations.getTableDescription(tableInfo
.getOrgName());
List<NameNodeHolder> nameNodes = desc.getNameNodes();
Function func = desc.getFunction();
//分库分表函数,本质就是获取table节点中namenode节点的序号
for (Map<String, Object> item : parameters) {
checkParameters(item, batchItem);
int i = func.execute(nameNodes.size(), item);
indexs.add(i);
}

if (indexs.size() > 0) {
AnalyzeResult analyzeResult = batchItem.getAnalyzeResult();
HavingInfo havingInfo = analyzeResult.getHavingInfo();
if (havingInfo != null) {
AnalyzerCallback callback = havingInfo.getCallback();
if (callback != null) {
callback.call();
}
}
}
if ((!batchItem.getAnalyzeResult().getAppendResultColumns().isEmpty() || batchItem
.getAnalyzeResult().getLimit() != null) && indexs.size() > 1) {// 存在limit或者avg等聚集函数,需要重新生成sql
Collection<AnalyzerCallback> analyzerCallbacks = batchItem
.getAnalyzeResult().getAnalyzerCallbacks();
if (batchItem.getAnalyzeResult().getLimit() != null) {
SqlValueItem limitItem = batchItem.getAnalyzeResult()
.getLimit();
SqlValueItem offsetItem = batchItem.getAnalyzeResult()
.getOffset();
if(offsetItem==null){
offsetItem=new SqlValueItem();
offsetItem.setValue(0);
}
if (limitItem.getParameterIndex() > 0
&& offsetItem.getParameterIndex() > 0) {// limit ?,?
Integer limitSize = limitItem.getValue()
+ offsetItem.getValue();
batchItem.getCallback(limitItem.getParameterIndex())
.setParameter(limitSize);
batchItem.getCallback(offsetItem.getParameterIndex())
.setParameter(0);

if(limitItem.getParameterIndex() > offsetItem.getParameterIndex()){
batchItem.getCallback(offsetItem.getParameterIndex())
.setParameterIndex(limitItem.getParameterIndex());

batchItem.getCallback(limitItem.getParameterIndex())
.setParameterIndex(offsetItem.getParameterIndex());
}
} else if (limitItem.getParameterIndex() > 0) {// limit 1,?
Integer limitSize = limitItem.getValue()
+ offsetItem.getValue();
batchItem.getCallback(limitItem.getParameterIndex())
.setParameter(limitSize);
} else if (offsetItem.getParameterIndex() > 0) {// limit ?,10
batchItem.getCallback(offsetItem.getParameterIndex())
.setParameter(0);
}
}
for (AnalyzerCallback callback : analyzerCallbacks) {
callback.call();
}
} else {// 在单库路由的情况下,如果检测到是limit ?,?,就置换Parameter顺序
SqlValueItem limitItem = batchItem.getAnalyzeResult()
.getLimit();
SqlValueItem offsetItem = batchItem.getAnalyzeResult()
.getOffset();

if(limitItem !=null && offsetItem !=null &&
limitItem.getParameterIndex() > offsetItem.getParameterIndex()){

batchItem.getCallback(offsetItem.getParameterIndex())
.setParameterIndex(limitItem.getParameterIndex());

batchItem.getCallback(limitItem.getParameterIndex())
.setParameterIndex(offsetItem.getParameterIndex());
}
}
for (Integer i : indexs) {// 生成target
NameNode nameNode = configurations.getNameNode(
tableInfo.getOrgName(), i);
DefaultRouteTarget target = this.createTarget(batchItem, nameNode, tableInfo);
targetSet.add(target);
}


for (RouteTarget item : targetSet) {
DefaultRouteTarget target = (DefaultRouteTarget) item;
SqlExecuteInfo info = new SqlExecuteInfo();
info.setCallbacks(new LinkedHashSet<ParameterCallback<?>>(batchItem
.getCallbacks()));

if (desc.isDifferentName()) {
info.setExecuteSql(configurations.getGenerator().generate(
(NameNodeHolder) target.getNameNode(),
batchItem.getAnalyzeResult()));
} else if ((!batchItem.getAnalyzeResult().getAppendResultColumns().isEmpty() || batchItem
.getAnalyzeResult().getLimit() != null) && nameNodes.size() > 1) {// 存在limit或者avg等聚集函数,需要重新生成sql,必须要超过1个路由结果
info.setExecuteSql(configurations.getLimitAvgGenerator()
.generate((NameNodeHolder) target.getNameNode(),
batchItem.getAnalyzeResult()));
} else {
info.setExecuteSql(batchItem.getSql());
}
target.setExecuteInfo(info);
}
return targetSet;
}


(3)SimpleExecutor 和 HandlerFactory:

根据StatementContext的RouteTarget(路由数据),

新建事务并获取数据库连接, 实际执行JDBC curd操作的类.

兴趣点:发现doUpdate的时候有用同步工具类:

CyclicBarrier barrier = new CyclicBarrier(n);




@SuppressWarnings({ "rawtypes", "unchecked" })
public class SimpleExecutor implements Executor {
static Logger logger = LoggerFactory.getLogger(SimpleExecutor.class);
static final ExecuteHandler<Integer> deleteHandler = new DeleteExecuteHandler();
static final ExecuteHandler<Integer> insertHandler = new InsertExecuteHandler();
static final ExecuteHandler<Integer> updateHandler = new UpdateExecuteHandler();
static final ExecuteHandler<ResultSet> queryHandler = new QueryExecuteHandler();

@Override
public Object execute(StatementContext context, ExecuteCallback callback)
throws SQLException {

switch (context.getCurrentBatch().getAnalyzeResult().getStatementType()) {
case SELECT:
return this.doQuery(context, callback);
case INSERT:
case UPDATE:
case DELETE:
return doUpdate(context, callback);
default:
break;
}
return null;
}

ExecuteHandler<?> getHandler(StatementType statementType) {
switch (statementType) {
case SELECT:
return queryHandler;
case INSERT:
return insertHandler;
case UPDATE:
return updateHandler;
case DELETE:
return deleteHandler;
default:
break;
}
return null;
}
...


5.集成Mybatis:

因为Mybatis获取连接是通过PooledDataSource或UnpooledDataSource获取的,所以写个插件:包装下DataSource,把oceanus的connentionWrap包装进去即可实现整合。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值