DataNode启动流程源码分析

  publicstatic voidsecureMain(String args[], SecureResources resources) {

    int errorCode =0;

    try {

      StringUtils.startupShutdownMessage(DataNode.class,args, LOG);

      //创建DataNode实例

      DataNodedatanode = createDataNode(args,null, resources);

      if (datanode !=null) {

       //在这个Data Node实例上调用join方法等待DataNode停止运行

       datanode.join();

      }else {

       errorCode = 1;

      }

    } catch (Throwable e) {

      terminate(1, e);

    } finally {

      terminate(errorCode);

    }

  }

 

  public static DataNodecreateDataNode(Stringargs[], Configurationconf,

      SecureResourcesresources) throws IOException {

    //初始化Data Node

    DataNodedn = instantiateDataNode(args,conf, resources);

    if (dn !=null) {

      //以后台进程运行DataNode,启动DataNode 上的各个服务

      dn.runDatanodeDaemon();

    }

    return dn;

  }

 

  public static DataNodeinstantiateDataNode(Stringargs [], Configuration conf,

      SecureResourcesresources) throws IOException {

    if (conf ==null)

      conf = new HdfsConfiguration();

   

    if (args !=null) {

      // parse generic hadoop options

      GenericOptionsParserhParser = newGenericOptionsParser(conf,args);

      args = hParser.getRemainingArgs();

    }

   

    if (!parseArguments(args,conf)) {

      printUsage(System.err);

      return null;

    }

    //根据配置文件配置的Data Node数据目录创建StorageLocation

    Collection<StorageLocation> dataLocations =getStorageLocations(conf);

    UserGroupInformation.setConfiguration(conf);

    SecurityUtil.login(conf,DFS_DATANODE_KEYTAB_FILE_KEY,

       DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);

    //返回DataNode实例

    return makeInstance(dataLocations,conf, resources);

  }

  static DataNode makeInstance(Collection<StorageLocation>dataDirs,

      Configurationconf, SecureResources resources) throws IOException {

    LocalFileSystemlocalFS = FileSystem.getLocal(conf);

    //获取文件目录的权限,默认是700

    FsPermissionpermission = newFsPermission(

       conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,

                DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));

    //创建Data Node磁盘检查器

    DataNodeDiskCheckerdataNodeDiskChecker =

       new DataNodeDiskChecker(permission);

    //对StorageLocation进行磁盘检查

    List<StorageLocation> locations =

       checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);

    DefaultMetricsSystem.initialize("DataNode");

 

    assert locations.size() >0 : "number of data directories should be > 0";

    //创建Data Node

    return newDataNode(conf,locations, resources);

  }

 

  DataNode(final Configurationconf,

          final List<StorageLocation> dataDirs,

          final SecureResources resources) throws IOException {

    super(conf);

    this.tracer =createTracer(conf);

    this.tracerConfigurationManager =

       new TracerConfigurationManager(DATANODE_HTRACE_PREFIX,conf);

    /*

     *创建数据块扫描器,其作用就是周期性验证Data Node上存储的所有数据块的正确性

     *并把损坏的数据块报告给Name Node,其具体

     */

    this.blockScanner =new BlockScanner(this, conf);

    this.lastDiskErrorCheck =0;

    this.maxNumberOfBlocksToLog =conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,

       DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);

 

    this.usersWithLocalPathAccess = Arrays.asList(

       conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));

    this.connectToDnViaHostname =conf.getBoolean(

        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,

        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);

    this.getHdfsBlockLocationsEnabled =conf.getBoolean(

        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,

        DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);

    this.supergroup =conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,

        DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);

    this.isPermissionEnabled =conf.getBoolean(

        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,

        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);

    this.pipelineSupportECN =conf.getBoolean(

        DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,

        DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);

 

    confVersion = "core-" +

       conf.get("hadoop.common.configuration.version","UNSPECIFIED") +

       ",hdfs-" +

       conf.get("hadoop.hdfs.configuration.version","UNSPECIFIED");

 

    //获取配置参数dfs.client.read.shortcircuit,如果为true,打开短路读

    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,

              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT)) {

      Stringreason = DomainSocket.getLoadingFailureReason();

      if (reason !=null) {

       LOG.warn("Filedescriptor passing is disabled because " +reason);

       this.fileDescriptorPassingDisabledReason =reason;

      }else {

       LOG.info("Filedescriptor passing is enabled.");

       this.fileDescriptorPassingDisabledReason =null;

      }

    } else {

      this.fileDescriptorPassingDisabledReason =

         "File descriptor passing was notconfigured.";

      LOG.debug(this.fileDescriptorPassingDisabledReason);

    }

 

    try {

      //获取Data Node 的主机名

      hostName = getHostName(conf);

      LOG.info("Configuredhostname is " +hostName);

      //启动Data Node

      startDataNode(conf, dataDirs, resources);

    } catch (IOException ie) {

      shutdown();

      throw ie;

    }

    final int dncCacheMaxSize =

       conf.getInt(DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY,

           DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT) ;

    datanodeNetworkCounts =

        CacheBuilder.newBuilder()

            .maximumSize(dncCacheMaxSize)

            .build(newCacheLoader<String, Map<String, Long>>() {

             @Override

             public Map<String, Long> load(String key) throws Exception {

               final Map<String, Long> ret = new HashMap<String, Long>();

               ret.put("networkErrors",0L);

               return ret;

              }

            });

  }

  void startDataNode(Configuration conf,

                    List<StorageLocation> dataDirs,

                     SecureResourcesresources

                     )throws IOException {

 

    // settings global for all BPs in the Data Node

    this.secureResources =resources;

    synchronized (this) {

      this.dataDirs =dataDirs;

    }

    this.conf =conf;

    this.dnConf =new DNConf(conf);

    checkSecureConfig(dnConf, conf, resources);

    /*

     * dfs.datanode.max.locked.memory: DataNode在内存中缓存副本块的最大内存数

     *默认参数是0,表示不缓存副本块到内存;而且本地库还需要支持,如果不支持,也无法使用

     */

    if (dnConf.maxLockedMemory >0) {

      //如果本地库不支持,但是你有设置这个参数,且这个参数大于0,那么会报错,Data Node将不能启动

      if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {

       throw newRuntimeException(String.format(

           "Cannot start datanode because theconfigured max locked memory" +

            " size (%s) is greater than zero and native code isnot available.",

           DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));

      }

      if (Path.WINDOWS) {

        NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);

      }else {

       long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

       if (dnConf.maxLockedMemory >ulimit) {

         throw newRuntimeException(String.format(

           "Cannot start datanode because theconfigured max locked memory" +

           " size (%s) of %d bytes is more thanthe datanode's available" +

           " RLIMIT_MEMLOCK ulimit of %dbytes.",

           DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,

           dnConf.maxLockedMemory,

           ulimit));

        }

      }

    }

    LOG.info("StartingDataNode with maxLockedMemory = " +

       dnConf.maxLockedMemory);

    /*

     * Data Node最重要的功能就是管理磁盘上存储的数据块。Data Node将这个管理功能切分为2个部分:

     * 1 DataStorage:管理与组织磁盘存储目录,如current,previous,detach,tmp等,在Data

     * Node数据目录,你可以看到一些current tmp,rbw或者finalized文件夹

     * 2 FsDatasetImpl:管理组织数据块和元数据文件

     */

    storage = new DataStorage();

   

    //向JMX注册

    registerMXBean();

    //初始化DataXceiver

    initDataXceiver(conf);

    startInfoServer(conf);

    pauseMonitor = new JvmPauseMonitor(conf);

    pauseMonitor.start();

 

    // BlockPoolTokenSecretManager is required to create ipcserver.

    this.blockPoolTokenSecretManager =new BlockPoolTokenSecretManager();

 

    // Login is done by now. Set the DN user name.

    dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();

    LOG.info("dnUserName= " +dnUserName);

    LOG.info("supergroup= " +supergroup);

    //初始化RPC server

    initIpcServer(conf);

 

    metrics = DataNodeMetrics.create(conf,getDisplayName());

    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

    //初始化BlockPoolManager

    blockPoolManager = newBlockPoolManager(this);

    //刷新NameNodes

    blockPoolManager.refreshNamenodes(conf);

 

    // Create the ReadaheadPool from the DataNode context sowe can

    // exit without having to explicitly shutdown its threadpool.

    readaheadPool = ReadaheadPool.getInstance();

    saslClient = new SaslDataTransferClient(dnConf.conf,

       dnConf.saslPropsResolver,dnConf.trustedChannelResolver);

    saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);

  }

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

莫言静好、

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值