shardingsphere基于时间动态分片
前言
之前我们介绍了shardingsphere基于时间动态分片的功能,但是该功能在动态加载时非常缓慢
一、 性能分析
拿出性能分析神器:Arthas
在对contextManager的alterRuleConfiguration方法进行耗时分析
trace org.apache.shardingsphere.mode.manager.ContextManager alterRuleConfiguration -n 5 --skipJDKMethod false
对org.apache.shardingsphere.mode.manager.ContextManager的alterRuleConfiguration方法 进行耗时监控
二、原因
监控到
org.apache.shardingsphere.mode.metadata.persist.service.DatabaseMetaDataPersistService的loadSchemas方法中的循环耗时20000ms
public Map<String, ShardingSphereSchema> loadSchemas(String databaseName) {
Collection<String> schemaNames = this.loadAllSchemaNames(databaseName);
Set<String> set = new HashSet<>(schemaNames);
Map<String, ShardingSphereSchema> result = new LinkedHashMap(set.size(), 1.0F);
//就是这个循环
set.forEach((each) -> {
ShardingSphereSchema var10000 = (ShardingSphereSchema)result.put(each.toLowerCase(), new ShardingSphereSchema(this.tableMetaDataPersistService.load(databaseName, each), this.viewMetaDataPersistService.load(databaseName, each)));
});
return result;
}
继续分析
该循环全部是重复的对logic_db这一个schemaNames操作
三.解决方案
我们认为分析到此接下去分析已经没有意义了,直接对schemaNames 这个集合去重即可
对源项目修改并打包是痛苦的过程,这里我们直接创建该class的路径和类确保绝对路径一直
(编译输出的时候会优先使用项目src下面的类,而不是优先使用Jar包里面的类)
//
// Source code recreated from a .class file by IntelliJ IDEA
// (powered by FernFlower decompiler)
//
package org.apache.shardingsphere.mode.metadata.persist.service;
import java.util.*;
import lombok.Generated;
import org.apache.shardingsphere.infra.metadata.database.schema.SchemaManager;
import org.apache.shardingsphere.infra.metadata.database.schema.decorator.model.ShardingSphereSchema;
import org.apache.shardingsphere.infra.metadata.database.schema.decorator.model.ShardingSphereTable;
import org.apache.shardingsphere.mode.metadata.persist.node.DatabaseMetaDataNode;
import org.apache.shardingsphere.mode.metadata.persist.service.schema.TableMetaDataPersistService;
import org.apache.shardingsphere.mode.metadata.persist.service.schema.ViewMetaDataPersistService;
import org.apache.shardingsphere.mode.persist.PersistRepository;
public final class DatabaseMetaDataPersistService {
private final PersistRepository repository;
private final TableMetaDataPersistService tableMetaDataPersistService;
private final ViewMetaDataPersistService viewMetaDataPersistService;
public DatabaseMetaDataPersistService(PersistRepository repository) {
this.repository = repository;
this.tableMetaDataPersistService = new TableMetaDataPersistService(repository);
this.viewMetaDataPersistService = new ViewMetaDataPersistService(repository);
}
public void addDatabase(String databaseName) {
this.repository.persist(DatabaseMetaDataNode.getDatabaseNamePath(databaseName), "");
}
public void dropDatabase(String databaseName) {
this.repository.delete(DatabaseMetaDataNode.getDatabaseNamePath(databaseName));
}
public Collection<String> loadAllDatabaseNames() {
return this.repository.getChildrenKeys(DatabaseMetaDataNode.getMetaDataNodePath());
}
public void addSchema(String databaseName, String schemaName) {
this.repository.persist(DatabaseMetaDataNode.getMetaDataTablesPath(databaseName, schemaName), "");
}
public void dropSchema(String databaseName, String schemaName) {
this.repository.delete(DatabaseMetaDataNode.getMetaDataSchemaPath(databaseName, schemaName));
}
public void compareAndPersist(String databaseName, String schemaName, ShardingSphereSchema schema) {
if (schema.getTables().isEmpty() && schema.getViews().isEmpty()) {
this.addSchema(databaseName, schemaName);
}
Map<String, ShardingSphereTable> currentTables = this.tableMetaDataPersistService.load(databaseName, schemaName);
this.tableMetaDataPersistService.persist(databaseName, schemaName, SchemaManager.getToBeAddedTables(schema.getTables(), currentTables));
SchemaManager.getToBeDeletedTables(schema.getTables(), currentTables).forEach((key, value) -> {
this.tableMetaDataPersistService.delete(databaseName, schemaName, key);
});
}
public void persist(String databaseName, String schemaName, ShardingSphereSchema schema) {
if (schema.getTables().isEmpty() && schema.getViews().isEmpty()) {
this.addSchema(databaseName, schemaName);
}
this.tableMetaDataPersistService.persist(databaseName, schemaName, schema.getTables());
}
public void delete(String databaseName, String schemaName, ShardingSphereSchema schema) {
schema.getTables().forEach((key, value) -> {
this.tableMetaDataPersistService.delete(databaseName, schemaName, key);
});
}
public Map<String, ShardingSphereSchema> loadSchemas(String databaseName) {
Collection<String> schemaNames = this.loadAllSchemaNames(databaseName);
//对schemaNames去重
Set<String> set = new HashSet<>(schemaNames);
Map<String, ShardingSphereSchema> result = new LinkedHashMap(set.size(), 1.0F);
set.forEach((each) -> {
ShardingSphereSchema var10000 = (ShardingSphereSchema)result.put(each.toLowerCase(), new ShardingSphereSchema(this.tableMetaDataPersistService.load(databaseName, each), this.viewMetaDataPersistService.load(databaseName, each)));
});
return result;
}
private Collection<String> loadAllSchemaNames(String databaseName) {
return this.repository.getChildrenKeys(DatabaseMetaDataNode.getMetaDataSchemasPath(databaseName));
}
@Generated
public PersistRepository getRepository() {
return this.repository;
}
@Generated
public TableMetaDataPersistService getTableMetaDataPersistService() {
return this.tableMetaDataPersistService;
}
@Generated
public ViewMetaDataPersistService getViewMetaDataPersistService() {
return this.viewMetaDataPersistService;
}
}
看看效果
立竿见影!剩下的3s后面有空再优化。
总结
使用arthas
+全限定类名相同时包内类先加载
的套路解决jar包的性能问题确实香!