Hive CombineHiveInputFormat获取切片逻辑[源码]

CombineHiveInputFormat获取切片逻辑:
实际执行的是:
    org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.createSplits()
1.获取该表的所有fileStatus
2.mapreduce.input.fileinputformat.split.maxsize=maxSize
3.遍历
    for (FileStatus fst : fileStatus){
        //如果该file是可切分的
        //遍历该file的block,如果该splitSize<block.size<2*split_maxsize
        //则将该block平均分为2份,封装成2个OneblockInfo,以避免分配太小的切片
        //并将这些OneBlockInfo填充到rackToBlocks/nodesToBlocks/blockToNodes
        files[i] = new OneFileInfo(stat, conf, isSplitable(job, stat.getPath()),
                                 rackToBlocks, blockToNodes, nodeToBlocks,
                                 rackToNodes, maxSize);
        totLength += files[i].getLength();
    }
4.解析rackToBlocks/nodesToBlocks/blockToNodes
    //首先分配node上的,以使计算本地性
    while(true){
        for (Node node : nodesToBlocks.key){
            curSplitSize = 0;
            for (Block block : nodesToBlocks.get(node)){
                //如果blocksToNodes包含该block
                curSplitSize += block.size;
                //如果当前切片的splitSize>maxSize,
                //则创建该切片,splitSize=0;
                //blocksToNodes移除该分片中分配的block
                //break;跳到下一个node,分配下一个node上的blcok,以使切片在所有的node上均匀分配
            }
            如果minSizeNode!=0 && splitSize >= minSizeNode && 该node从未分配过split
              则将当前split中的blocks划分为一个切片,并标记该node已分配完成
            否则将该split中的blocks重新放回blocksToNodes
        }   
        当所有的node都已经分配完成||总的文件长度==0
        跳出循环!
    }
    //然后基于剩下的blocks给机架分配切片
    while(blocksToNodes.size > 0){
        for (Rack rack : rackToBlocks.key){
            curSplitSize = 0;
            for (Block block : nodesToBlocks.get(node)){
                //如果blocksToNodes包含该block
                curSplitSize += block.size;
                //如果当前切片的splitSize>maxSize,
                //则创建该切片splitSize=0;
                //blocksToNodes移除该分片中分配的block
                //break;跳到下一个机架,分配下一个rack上的blcok
            }
            如果minSizeRack!=0 && splitSize >= minSizeRack
              则将当前split中的blocks划分为一个切片
            否则将该split中的blocks放到一个overFlow block list中
        }   
    }
    //处理overFlow block list中的OneInfoBlock
    //遍历 累加 splitSize += block.size
    //如果累加的切片size>maxsize,则创建一个切片,splitSize=0
    //如果全部累加完,仍小于maxsize,则将剩下的再创建一个切片

private void getMoreSplits(JobContext job, List<FileStatus> stats,
                           long maxSize, long minSizeNode, long minSizeRack,
                           List<InputSplit> splits){
        //.....   
        for (FileStatus stat : stats) {
              files[i] = new OneFileInfo(stat, conf, isSplitable(job, stat.getPath()),
                                         rackToBlocks, blockToNodes, nodeToBlocks,
                                         rackToNodes, maxSize);
              totLength += files[i].getLength();
        }
        createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength, 
                     maxSize, minSizeNode, minSizeRack, splits);                            
}
OneFileInfo(FileStatus stat, Configuration conf,
            boolean isSplitable,
            HashMap<String, List<OneBlockInfo>> rackToBlocks,
            HashMap<OneBlockInfo, String[]> blockToNodes,
            HashMap<String, Set<OneBlockInfo>> nodeToBlocks,
            HashMap<String, Set<String>> rackToNodes,
            long maxSize)
        throws IOException {
    this.fileSize = 0;

    // get block locations from file system
    BlockLocation[] locations;
    if (stat instanceof LocatedFileStatus) {
        locations = ((LocatedFileStatus) stat).getBlockLocations();
    } else {
        FileSystem fs = stat.getPath().getFileSystem(conf);
        locations = fs.getFileBlockLocations(stat, 0, stat.getLen());
    }
    // create a list of all block and their locations
    if (locations == null) {
        blocks = new OneBlockInfo[0];
    } else {
        if (locations.length == 0 && !stat.isDirectory()) {
            locations = new BlockLocation[]{new BlockLocation()};
        }
        if (!isSplitable) {
            // 如果文件不可切分,则根据该文件创建一个OneBlockInfo
            blocks = new OneBlockInfo[1];
            fileSize = stat.getLen();
            blocks[0] = new OneBlockInfo(stat.getPath(), 0, fileSize,
                    locations[0].getHosts(), locations[0].getTopologyPaths());
        } else {
            ArrayList<OneBlockInfo> blocksList = new ArrayList<OneBlockInfo>(
                    locations.length);
            for (int i = 0; i < locations.length; i++) {
                fileSize += locations[i].getLength();
                // each split can be a maximum of maxSize
                long left = locations[i].getLength();
                long myOffset = locations[i].getOffset();
                long myLength = 0;
                do {
                    if (maxSize == 0) {
                        myLength = left;
                    } else {
                        if (left > maxSize && left < 2 * maxSize) {
                            // 将该文件平均切分为2个OneBlockInfo,
                            // 以避免最后生成过小的切片
                            myLength = left / 2;
                        } else {
                            myLength = Math.min(maxSize, left);
                        }
                    }
                    OneBlockInfo oneblock = new OneBlockInfo(stat.getPath(),
                            myOffset, myLength, locations[i].getHosts(),
                            locations[i].getTopologyPaths());
                    left -= myLength;
                    myOffset += myLength;
                    blocksList.add(oneblock);
                } while (left > 0);
            }
            blocks = blocksList.toArray(new OneBlockInfo[blocksList.size()]);
        //将所有的OneBlockInfo相关的信息填充到集合映射中
        populateBlockInfo(blocks, rackToBlocks, blockToNodes,
                nodeToBlocks, rackToNodes);
    }
}

void createSplits(Map<String, Set<OneBlockInfo>> nodeToBlocks,
                  Map<OneBlockInfo, String[]> blockToNodes,
                  Map<String, List<OneBlockInfo>> rackToBlocks,
                  long totLength,
                  long maxSize,
                  long minSizeNode,
                  long minSizeRack,
                  List<InputSplit> splits
) {
    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    long curSplitSize = 0;

    int totalNodes = nodeToBlocks.size();
    long totalLength = totLength;
    Multiset<String> splitsPerNode = HashMultiset.create();
    Set<String> completedNodes = new HashSet<String>();
    //先根据node来分配node上的split,遍历node对应的Blocks
    while (true) {
        for (Iterator<Map.Entry<String, Set<OneBlockInfo>>> iter = nodeToBlocks
                .entrySet().iterator(); iter.hasNext(); ) {
            Map.Entry<String, Set<OneBlockInfo>> one = iter.next();
            //node
            String node = one.getKey();
            // 如果该node之前被标记为已完成,则跳过.
            if (completedNodes.contains(node)) {
                continue;
            }
            //node对应的OneBlockInfo
            Set<OneBlockInfo> blocksInCurrentNode = one.getValue();
            Iterator<OneBlockInfo> oneBlockIter = blocksInCurrentNode.iterator();
            while (oneBlockIter.hasNext()) {
                OneBlockInfo oneblock = oneBlockIter.next();
                //移除之前已经被分配给其他split的OneBlockInfo
                if (!blockToNodes.containsKey(oneblock)) {
                    oneBlockIter.remove();
                    continue;
                }
                validBlocks.add(oneblock);//暂时添加到该集合
                blockToNodes.remove(oneblock);//移除该block
                curSplitSize += oneblock.length;//累加该block.size到当前切片size
                //如果当前split.size > 切片最大size,则创建一个切片
                if (maxSize != 0 && curSplitSize >= maxSize) {
                    // create an input split and add it to the splits array
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;//减去分配的切片的size
                    curSplitSize = 0;//归零
                    splitsPerNode.add(node);//每个node对应的切片数
                    //从当前node对应的OneBlockInfos中移除该block
                    blocksInCurrentNode.removeAll(validBlocks);//移除所有已经被分配的blocks
                    validBlocks.clear();//重置
                    //跳到下一个node重复上述过程,以使该切片在集群中分配跟均匀[利用数据本地性]
                    break;
                }

            }
            if (validBlocks.size() != 0) {
                //如果该node当前切片(包含剩下的一部分OneBlockInfos)的size仍然<最大切片size
                if (minSizeNode != 0 && curSplitSize >= minSizeNode
                        && splitsPerNode.count(node) == 0) {
                    //满足上述条件,最后一个条件是还没有为该node分配过切片
                    //则为在该node上创建一个切片
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;//减去
                    splitsPerNode.add(node);
                    blocksInCurrentNode.removeAll(validBlocks);
                } else {
                    // Put the unplaced blocks back into the pool for later rack-allocation.
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                }
                validBlocks.clear();
                curSplitSize = 0;
                completedNodes.add(node);
            } else { // No in-flight blocks.
                if (blocksInCurrentNode.size() == 0) {
                    // Node is done. All blocks were fit into node-local splits.
                    completedNodes.add(node);//标记该node为已完成
                } // else Run through the node again.
            }
        }
        //如果node已经分配完成或文件已经被切分完毕,则结束while循环
        if (completedNodes.size() == totalNodes || totalLength == 0) {
            LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: "
                    + completedNodes.size() + ", size left: " + totalLength);
            break;
        }
    }

    //基于机架rack来进行剩余的block划分
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();
    while (blockToNodes.size() > 0) {
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter =
             rackToBlocks.entrySet().iterator(); iter.hasNext(); ) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();
            boolean createdSplit = false;
            //遍历该机架rack上对应的blocks
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;
                    //满足该条件则划分一个新切片
                    if (maxSize != 0 && curSplitSize >= maxSize) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(splits, getHosts(racks), validBlocks);
                        createdSplit = true;
                        break;
                    }
                }
            }
            //如果根据当前切片创建了一个新切片,则跳到下一个机架,为下一个机架分配切片
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    //满足该条件则创建一个新切片
                    addCreatedSplit(splits, getHosts(racks), validBlocks);
                } else {
                    //否则将当前待划分的blocks添加到overFlow block列表中
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;//归零
            validBlocks.clear();//重置
            racks.clear();//重置
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // 处理剩下的overFlow block list中的block
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }
        if (maxSize != 0 && curSplitSize >= maxSize) {
            // 如果当前待分配的splitSize > maxSize,则创建一个新切片
            addCreatedSplit(splits, getHosts(racks), validBlocks);
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }
    //如果还剩下的没有满足条件的,则将剩下的划分为一个切片
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(splits, getHosts(racks), validBlocks);
    }
}

 

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值