边缘代码略过,核心代码笔记
客户端
1.table_jsp._jspService(HttpServletRequest request, HttpServletResponse response)
调用客户端HBaseAdmin的compact方法来压缩region
//调用HBaseAdmin的功能来完成Region的压缩
HBaseAdmin hbadmin = new HBaseAdmin(conf);
hbadmin.compact(tableName);
2.HBaseAdmin客户端
//判断是对某个Region进行压缩还是对全表进行压缩
isRegionName(tableNameOrRegionName)
//从.META.表里读出HRegionInfo和HServerAddress
Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName);
compact((HServerAddress)pair.getSecond(), (HRegionInfo)pair.getFirst(), major);
//用HServerAddress连上远程HRegionServer,HRegionServer的代理接口就是HRegionInterface
HRegionInterface rs = connection.getHRegionConnection(hsa);
//远程调用压缩Region
rs.compactRegion(hri, major);
RegionServer服务器端
1.HRegionServer
//从HRegionServer的protected final Map onlineRegions = new HashMap();里拿出在线的HRegion
HRegion region = getRegion(regionInfo.getRegionName());
//用compactSplitThread后台线程来处理压缩Region的操作 compactSplitThread.requestCompaction(region, major, (new StringBuilder()).append("User-triggered ").append(major ? "major " : "").append("compaction").toString(), 1);
2.CompactSplitThread
//用一个PriorityCompactionQueue来接收请求
compactionQueue.add(r, priority)
//在CompactSplitThread的run方法里监听PriorityCompactionQueue有没有新的要压缩的Region
r = compactionQueue.poll(this.frequency, TimeUnit.MILLISECONDS);
//调用HRegion的compactStores来压缩Region
byte [] midKey = r.compactStores();
3.HRegion
//循环调用Store的compact进行压缩,并且找出一个size最大的Store做为要分拆的splitRow
for (Store store: stores.values()) {
final Store.StoreSize ss = store.compact(majorCompaction);
lastCompactSize += store.getLastCompactSize();
if (ss != null && ss.getSize() > maxSize) {
maxSize = ss.getSize();
splitRow = ss.getSplitRow();
}
}
4.Store
StoreSize compact(final boolean forceMajor) throws IOException
第一步判断是部分合并还是完全合并,首先把Store下面所有的storefiles都赋给filesToCompact,这种就是完全合并;再通过一大段计算和判断,计算出部分合并
List<StoreFile> filesToCompact = this.storefiles;
filesToCompact = new ArrayList<StoreFile>(filesToCompact.subList(start, end));
第二步进行合并,就是把filesToCompact合并成一个临时文件,放到.tmp目录
StoreFile.Writer writer = compact(filesToCompact, majorcompaction, maxId);
第三步提交合并,就是把临时文件writer移动到store目录下,并且将filesToCompact删除,再合并完的sf加到this.storefiles里,并且把filesToCompact从this.storefiles里删除
StoreFile sf = completeCompaction(filesToCompact, writer);
5.StoreFile
Store调用StoreFile的接口来完成文件的读写等操作
6.StoreScaner
Store调用StoreScaner来完成对原有StoreFile的扫描,排序,合并成一个新的文件,这是compact算法的核心
StoreScaner的region compact算法分析可以参考我另一篇博客
[url]http://uestzengting.iteye.com/admin/blogs/1297738[/url]
客户端
1.table_jsp._jspService(HttpServletRequest request, HttpServletResponse response)
调用客户端HBaseAdmin的compact方法来压缩region
//调用HBaseAdmin的功能来完成Region的压缩
HBaseAdmin hbadmin = new HBaseAdmin(conf);
hbadmin.compact(tableName);
2.HBaseAdmin客户端
//判断是对某个Region进行压缩还是对全表进行压缩
isRegionName(tableNameOrRegionName)
//从.META.表里读出HRegionInfo和HServerAddress
Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName);
compact((HServerAddress)pair.getSecond(), (HRegionInfo)pair.getFirst(), major);
//用HServerAddress连上远程HRegionServer,HRegionServer的代理接口就是HRegionInterface
HRegionInterface rs = connection.getHRegionConnection(hsa);
//远程调用压缩Region
rs.compactRegion(hri, major);
RegionServer服务器端
1.HRegionServer
//从HRegionServer的protected final Map onlineRegions = new HashMap();里拿出在线的HRegion
HRegion region = getRegion(regionInfo.getRegionName());
//用compactSplitThread后台线程来处理压缩Region的操作 compactSplitThread.requestCompaction(region, major, (new StringBuilder()).append("User-triggered ").append(major ? "major " : "").append("compaction").toString(), 1);
2.CompactSplitThread
//用一个PriorityCompactionQueue来接收请求
compactionQueue.add(r, priority)
//在CompactSplitThread的run方法里监听PriorityCompactionQueue有没有新的要压缩的Region
r = compactionQueue.poll(this.frequency, TimeUnit.MILLISECONDS);
//调用HRegion的compactStores来压缩Region
byte [] midKey = r.compactStores();
3.HRegion
//循环调用Store的compact进行压缩,并且找出一个size最大的Store做为要分拆的splitRow
for (Store store: stores.values()) {
final Store.StoreSize ss = store.compact(majorCompaction);
lastCompactSize += store.getLastCompactSize();
if (ss != null && ss.getSize() > maxSize) {
maxSize = ss.getSize();
splitRow = ss.getSplitRow();
}
}
4.Store
StoreSize compact(final boolean forceMajor) throws IOException
第一步判断是部分合并还是完全合并,首先把Store下面所有的storefiles都赋给filesToCompact,这种就是完全合并;再通过一大段计算和判断,计算出部分合并
List<StoreFile> filesToCompact = this.storefiles;
filesToCompact = new ArrayList<StoreFile>(filesToCompact.subList(start, end));
第二步进行合并,就是把filesToCompact合并成一个临时文件,放到.tmp目录
StoreFile.Writer writer = compact(filesToCompact, majorcompaction, maxId);
第三步提交合并,就是把临时文件writer移动到store目录下,并且将filesToCompact删除,再合并完的sf加到this.storefiles里,并且把filesToCompact从this.storefiles里删除
StoreFile sf = completeCompaction(filesToCompact, writer);
5.StoreFile
Store调用StoreFile的接口来完成文件的读写等操作
6.StoreScaner
Store调用StoreScaner来完成对原有StoreFile的扫描,排序,合并成一个新的文件,这是compact算法的核心
StoreScaner的region compact算法分析可以参考我另一篇博客
[url]http://uestzengting.iteye.com/admin/blogs/1297738[/url]