start-all.sh
libexec/hadoop-config.sh -- 设置变量
sbin/start-dfs.sh --config $HADOOP_CONF_DIR --启动hdfs
sbin/start-yarn.sh --config $HADOOP_CONF_DIR --启动yarn
libexec/hadoop-config.sh –设置变量
COMMON_DIR
...
HADOOP_CONF_DIR=...
HEAF_SIZE=1000M,
CLASSPATH=...
sbin/start-dfs.sh
–config $HADOOP_CONF_DIR –启动hdfs
1.libexec/hdfs-cofig.sh
2.#获得名称节点主机名
NAMENAODES = hdfs getconf -namenodes
3.启动名称节点
"$HADOOP_PREFIX/bsin/hadoop-daemnos.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$NAMENODES"
--script "$bin/hdfs" start namenode $nameStartOpt
4.启动datanode
"HADOOP——PREFIX/SBIN/hadoop-daemods.sh" \
--config "$HADOOP_CONF_DIR" \
--script "$bin/hdfs" start datanode $dataStartOpt
5.启动2nn
"$HADOOP_prefix/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" start secondarynameneode
libexec/hdfs-config.sh
libexec/hdfs-config.sh
sbin/hadoop-daemons.sh
–启动守护进程脚本
1.libexec/hdfs -config.sh --启动守护进程脚本
2.slave.sh "--config $HADOOP CONF DIR cd "$HADOOP_PREFIX"\;"$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR"$@"
#循环slaves文件,通过ssh方式远程登录主机,执行相应命令
[bin/hadoop-daemon.sh]
hadoop-config.sh
bin/hads