ubuntu安装伪分布式hadoop
1、安装ssh
sudo apt-get install openssh-server openssh-client
创建ssh-key
ssh-keygen -t rsa -P ""
进入.ssh目录
cd ~/.ssh
创建authorized_keys
cat id_rsa.pub >> authorized_keys
编译/etc/ssh/sshd_config
sudo vim /etc/ssh/sshd_config
RSAAuthentication yes
PubkeyAuthentication yes
AuthorizedKeysFile %h/.ssh/authorized_keys
重启ssh使配置生效
/etc/init.d/ssh restart
查看是否生效
ssh localhost
2、安装vim
sudo apt-get install vim
3、安装jdk
将下载jdk安装文件拷贝到/tmp
进入/usr目录
cd /usr
拷贝jdk-6u31-linux-x64.bin到/usr
sudo cp /tmp/jdk-6u31-linux-x64.bin /usr
为jdk-6u31-linux-x64.bin赋权
sudo chmod 777 jdk-6u31-linux-x64.bin
执行jdk-6u31-linux-x64.bin
sudo ./jdk-6u31-linux-x64.bin
修改jdk1.6.0_31名称
sudo mv jdk1.6.0_31/ jdk
配置环境变量
sudo vi /etc/profile
export JAVA_HOME=/usr/jdk
export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre
使更新生效
source /etc/profile
验证安装是否成功
java -version
java version "1.6.0_31"
Java(TM) SE Runtime Environment (build 1.6.0_31-b04)
Java HotSpot(TM) 64-Bit Server VM (build 20.6-b01, mixed mode)
4、安装hadoop
将hadoop包拷贝到tmp
进入/usr目录
cd /usr
复制hadoop-1.2.1.tar.gz到/usr
sudo cp /tmp/hadoop-1.2.1.tar.gz /usr
解压hadoop-1.2.1.tar.gz
sudo tar -zxvf hadoop-1.2.1.tar.gz
hadoop-1.2.1重命名
sudo mv hadoop-1.2.1 hadoop
更改hadoop的拥有者
sudo chown -R hadoop:hadoop hadoop
进入/usr/hadoop/conf/
cd /usr/hadoop/conf/
修改hadoop-env.sh 在文件末尾加入信息
sudo vim hadoop-env.sh
export JAVA_HOME=/usr/jdk
配置core-site.xml在<configuration></configuration>间加入信息
sudo vim core-site.xml
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp/</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
配置hdfs-site.xml在<configuration></configuration>间加入信息
sudo vim hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
配置mapred-site.xml在<configuration></configuration>间加入信息
sudo vim mapred-site.xml
<property>
<name>mapred.job.tracker</name>
<value>localhost:9001</value>
</property>
sudo vim /etc/profile
export HADOOP_PREFIX=/usr/hadoop
export PATH=$PATH:$HADOOP_PREFIX/bin
export PATH=$PATH:$HADOOP_PREFIX/sbin
export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
export YARN_HOME=${HADOOP_PREFIX}
source /etc/profile
hadoop namenode -format
start-all.sh