多表并发:
(1)使用list拆分;
(2)拆分后并发执行;
private void migraByList() {
int readNum = 8;
int sqlSlicesNum = 4;
int minThreadNum = 1;
ExecutorService executorServiceRead = null;
AtomicLong wThread = new AtomicLong(0L);
executorServiceRead = new ThreadPoolExecutor(readNum, readNum, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue());
List averageAssign=new ArrayList();
if (this.list.size() > minThreadNum) {
averageAssign = ResotreScriptUtils.averageAssign(this.list, readNum);
} else {
averageAssign = ResotreScriptUtils.averageAssign(this.list, 1);
} Iterator var8 = averageAssign.iterator();
while(var8.hasNext()) { List<ObjInfoDTO> alist = (List)var8.next();
wThread.incrementAndGet();
executorServiceRead.execute(new MigraDataThread(this, alist, wThread));
}
while(wThread.get() != 0L) {
try {
Thread.sleep(2000L);
} catch (InterruptedException var9) {
var9.printStackTrace();
}
}
if (executorServiceRead != null) {
try {
this.logger.info("total migrate rows :" + this.totalRow.get());
executorServiceRead.shutdown();
while(!executorServiceRead.awaitTermination(1L, TimeUnit.SECONDS)) {
Thread.sleep(10L);
}
} catch (InterruptedException var10) {
var10.printStackTrace();
} }
}
大表并发:
(1)基本对象为sqlmodel(也即一条查询sql)
(2)对大表进行查询分片处理,生成多条查询sql;
(3)使用固定线程池,如果有空闲线程,填充一条sql进行查询;
private void migraBySi() {
AtomicInteger count = new AtomicInteger(0);
int readNum = 8;
int sqlSlicesNum = 4;
int minThreadNum = true;
ExecutorService executorServiceRead = null;
AtomicLong wThread = new AtomicLong(0L);
executorServiceRead = new ThreadPoolExecutor(readNum, readNum, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue());
try {
int si = 0; label403: while(true) { while(true) {
if (si >= this.list.size() || this.progress.getState() == -1) { } break label403; }
if (wThread.get() >= (long)readNum) { break; }
ObjInfoDTO obj = (ObjInfoDTO)this.list.get(si);
List<SqlModel> sqlList = new ArrayList();
DruidPooledConnection connSource = null;
try {
ObjInfoDTO<Table> table = new ObjInfoDTO();
table.setData(((Table)obj.getData()).getCloneTable());
table.setName(obj.getName());
table.setSchema(obj.getSchema());
table.setTargetSchema(obj.getTargetSchema());
connSource = DruidHelper.getInstance().getSourceConnection();
List<Column> columns = SourceDBUtil.getbase().getPrepareController().getColumns(connSource, (Table)table.getData());
((Table)table.getData()).setColumns(columns);
if (columns == null || columns.isEmpty()) {
if (this.progress != null) {
this.progress.getValue().incrementAndGet();
String process = "Table/Data : " + obj.getName();
StringBuilder log = new StringBuilder();
log.append(this.progress.getValue().get()).append("/").append(this.progress.getTotalCount()).append(" Create ").append(process);
}
++si;
continue;
}
Table primKeyColumn = SourceDBUtil.getbase().getPrepareController().getPrimKeyColumn(connSource, (Table)table.getData());
table.setData(primKeyColumn);
int size = SourceDBUtil.getbase().getPrepareController().getTableSize(connSource, (Table)table.getData());
boolean isBigTable = false;
if (size > 1000) {
isBigTable = true;
} String string; SqlModel sqlModel;
if (isBigTable) {
List<String> bsqlList = SourceDBUtil.getbase().getPrepareController().getSlicesSelectSQL(table, sqlSlicesNum);
boolean includeBlob = SourceDBUtil.getbase().getPrepareController().isIncludeBlob(columns);
String columnBulider = this.getCopyColumnBulider(columns); Iterator var50 = bsqlList.iterator();
while(var50.hasNext()) { string = (String)var50.next();
sqlModel = new SqlModel();
sqlModel.setSql(string);
sqlModel.setSchemaName(table.getSchema());
sqlModel.setTargetSchemaName(table.getTargetSchema());
sqlModel.setTableName(table.getName());
sqlModel.setIsInLob(includeBlob);
sqlModel.setColumns(columns);
sqlModel.setColumnBulider(columnBulider);
sqlList.add(sqlModel);
this.logger.info("SqlList :" + string);
}
} else {
String schemaName = SourceDBUtil.getbase().getKeyWordService().convertorNameToSelf(table.getSchema());
String tableName = SourceDBUtil.getbase().getKeyWordService().convertorNameToSelf(table.getName());
boolean includeBlob = SourceDBUtil.getbase().getPrepareController().isIncludeBlob(columns);
string = "SELECT " + this.getSeleteColumnBuilder(columns) + " FROM " + schemaName + "." + tableName;
String columnBulider = this.getCopyColumnBulider(columns);
sqlModel = new SqlModel();
sqlModel.setSql(string);
sqlModel.setSchemaName(table.getSchema());
sqlModel.setTableName(table.getName());
sqlModel.setTargetSchemaName(table.getTargetSchema());
sqlModel.setIsInLob(includeBlob);
sqlModel.setColumns(columns);
sqlModel.setColumnBulider(columnBulider);
sqlList.add(sqlModel);
this.logger.info("SqlList :" + string);
}
int sqlmodei = 0;
while(sqlmodei < sqlList.size() && this.progress.getState() != -1) {
if (wThread.get() < (long)readNum) {
wThread.incrementAndGet();
SqlModel sqlModel = (SqlModel)sqlList.get(sqlmodei);
this.logger.info("executorServiceRead-----start :" + sqlModel.getTableName());
executorServiceRead.execute(new ReadThread(this, sqlModel, wThread, count));
++sqlmodei;
} }
this.progress.getValue().incrementAndGet();
} catch (SQLException var38) {
ProcedureInfoTO info = MigrateResultUtils.createProcedureInfoTO(obj.getSchema() + "." + obj.getName(), 1, var38.getMessage());
this.tableCode.addFailTableList(info);
this.logger.info("Error loadDataSlit:" + obj.getSchema() + "." + obj.getName() + var38.getMessage());
} finally {
JdbcUtils.close(connSource);
}
++si;
break; } while(wThread.get() == (long)readNum && this.progress.getState() != -1) {
Thread.sleep(200L);
while(wThread.get() != 0L && this.progress.getState() != -1) {
Thread.sleep(200L);
}
}
} catch (InterruptedException var40) {
var40.printStackTrace();
} finally {
if (executorServiceRead != null) {
try {
this.logger.info("total migrate rows :" + this.totalRow.get());
executorServiceRead.shutdown();
while(!executorServiceRead.awaitTermination(1L, TimeUnit.SECONDS)) {
Thread.sleep(10L);
}
} catch (InterruptedException var37) {
var37.printStackTrace();
} }
}
}
注:
核心理念在于,预先对迁移任务进行分析(模糊分析,不需要精确完整的,尽可能地提高分析速度);通过并发多线程等技术对迁移对象进行处理(把对象整合裂解为一个个迁移单位);执行迁移任务,使其能够尽可能地使用系统资源(在执行部分迁移对象的同时,去分析处理其他迁移对象);
系统性能分析:系统性能分析从入门到进阶