groupadd hadoop useradd hadoop -G hadoop -g hadoop -p 123456 ssh-keygen -t rsa cat *pub >>authorized_keys service sshd restart ssh-copy-id -i master.pub hadoop@slave2 ssh-copy-id -i master.pub hadoop@slave1 ssh-copy-id -i *pub hadoop@master chmod 644 authorized_keys /* scp *pub hadoop@slave1:~/.ssh/ scp *pub hadoop@slave2:~/.ssh/ */ cat *master* >> authorized_keys vi .bash_profile export HADOOP_HOME=/home/hadoop/hadoop-2.2.0 PATH=$PATH:$HOME/bin:$HADOOP_HOME/bin source .bash_profile mkdir -p ~/dfs/name mkdir -p ~/dfs/data mkdir -p ~/temp cd /home/hadoop/hadoop-2.2.0/etc/hadoop vi core-site.xml <property> <name>fs.defaultFS</name> <value>hdfs://master:9000</value> </property> <property> <name>io.file.buffer.size</name> <value>131072</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/hadoop/temp</value> <description>Abase for other temporary directories.</description> </property> <property> <name>hadoop.proxyuser.hduser.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.hduser.groups</name> <value>*</value> </property> vi hdfs-site.xml <property> <name>dfs.namenode.secondary.http-address</name> <value>master:9001</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/home/hadoop/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/home/hadoop/dfs/data</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> cp mapred-site.xml.template mapred-site.xml vi mapred-site.xml <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>master:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>master:19888</value> </property> <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>master:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>master:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>master:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>master:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>master:8088</value> </property> </configuration> scp -r hadoop-2.2.0 hadoop@slave2:/home/hadoop/ hdfs namenode –format [hadoop@master hadoop-2.2.0]$ sbin/start-dfs.sh Starting namenodes on [master] master: starting namenode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-hadoop-namenode-master.out slave1: starting datanode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-hadoop-datanode-slave1.out slave2: starting datanode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-hadoop-datanode-slave2.out Starting secondary namenodes [master] master: starting secondarynamenode, logging to /home/hadoop/hadoop-2.2.0/logs/hadoop-hadoop-secondarynamenode-master.out [hadoop@master hadoop-2.2.0]$ sbin/start-yarn.sh starting yarn daemons starting resourcemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-hadoop-resourcemanager-master.out slave2: starting nodemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-hadoop-nodemanager-slave2.out slave1: starting nodemanager, logging to /home/hadoop/hadoop-2.2.0/logs/yarn-hadoop-nodemanager-slave1.out