- 切换账户
su hadoop
cd /hadoop
- 启动 zookeeper
# 分别启动节点:hadoop101 hadoop102 hadoop103
./zookeeper/bin/zkServer.sh start
- 启动 HDFS
# 在manager202上执行
./hadoop-2.7.6/sbin/start-dfs.sh
- 启动 YARN
# 在manager202上执行
./hadoop-2.7.6/sbin/start-yarn.sh
# manager203上执行
./hadoop-2.7.6/sbin/yarn-daemon.sh start resourcemanager
- 启动 HBase
# 在 manager202 上运行
./hbase-1.2.6/bin/start-hbase.sh
# 在 manager203 上运行
./hbase-1.2.6/bin/hbase-daemon.sh start master
- spark 执行示例
./spark-2.2.1-bin-hadoop2.7/bin/spark-submit --master yarn --deploy-mode cluster --class org.apache.spark.examples.SparkPi --executor-memory 512M --num-executors 1 /hadoop/spark-2.2.1-bin-hadoop2.7/examples/jars/spark-examples_2.11-2.2.1.jar 10