CDH4.4安装笔记

安装环境redhat 6.2

1、修改主机名称

vi /etc/sysconfig/network
hadoop1-8
2、修改 /etc/hosts
vi /etc/hosts
10.193.223.4 hadoop1
10.193.223.5 hadoop2
10.193.223.6 hadoop3
10.193.223.7 hadoop4
10.193.223.8 hadoop5
10.193.223.9 hadoop6
10.193.223.10 hadoop7
10.193.223.11 hadoop8
3、配置语言
vi /etc/sysconfig/i18n
LANG=en_US
4、关闭防火墙
service iptables stop
chkconfig iptables off
vi /etc/selinux/config  #SELINUX=disabled
##重启机器
5、安装JDK
mkdir /usr/java
chmod 775 -R /usr/java

cd /usr/java
./jdk-6u45-linux-x64.bin 
6、 配置环境变量
vi /etc/profile
export JAVA_HOME=/usr/java/jdk1.6.0_45
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
source /etc/profile
7、创建hadoop用户
groupadd hadoop
useradd  -g hadoop hadoop
passwd hadoop
vi /etc/sudoers  #添加 hadoop  ALL=(ALL) NOPASSWD:ALL
su - hadoop
mkdir .ssh
chmod 700 .ssh
cd .ssh
ssh-keygen -t rsa
touch authorized_keys 
chmod 600 authorized_keys
ssh hadoop1 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop2 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop3 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop4 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop5 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop6 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
ssh hadoop7 cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop2:.ssh/
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop3:.ssh/
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop4:.ssh/
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop5:.ssh/
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop6:.ssh/
scp /home/hadoop/.ssh/authorized_keys hadoop@hadoop7:.ssh/
9、hadoop安装部署
cd /home/hadoop
tar zxvf hadoop-2.0.0-cdh4.4.0.tar.gz
cd hadoop-2.0.0-cdh4.4.0/etc/hadoop
vi hadoop-env.sh
#添加环境变量
# Set Hadoop-specific environment variables here.
export JAVA_HOME=/usr/java/jdk1.6.0_45
export HADOOP_HOME=/home/hadoop/hadoop-2.0.0-cdh4.4.0
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_HOME=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HADOOP_HOME_WARN_SUPPRESS=1

vi /home/hadoop/.bash_profile
# User specific environment and startup programs
export HADOOP_HOME=/home/hadoop/hadoop-2.0.0-cdh4.4.0
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_CLASSPATH=`$HBASE_HOME/bin/hbase classpath`
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
source /home/hadoop/.bash_profile   #环境变量生效

mkdir /app/cdhworkspace/tmp
vi /home/hadoop/hadoop-2.0.0-cdh4.4.0/etc/hadoop/core-site.xml
#增加
<property>
    <name>fs.defaultFS</name>
    <value> hdfs://hadoop1:9000</value>
   </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value> /app/cdhworkspace/tmp</value>
   </property>

mkdir /app/cdhworkspace/dfs/name
mkdir /app/cdhworkspace/dfs/data
vi  /home/hadoop/hadoop-2.0.0-cdh4.4.0/etc/hadoop/hdfs-site.xml
#增加
<property>
       <name>dfs.namenode.name.dir</name>
       <value>/app/cdhworkspace/dfs/name</value>
   </property>
   <property>
       <name>dfs.datanode.data.dir</name>
       <value>/app/cdhworkspace/dfs/data</value>
   </property>
   <property>
       <name>dfs.replication</name>
       <value>3</value>
   </property>
   <property>
       <name>dfs.permissions</name>
       <value>false</value>
   </property>

mkdir /app/cdhworkspace/mapred
mkdir /app/cdhworkspace/mapred/system
mkdir /app/cdhworkspace/mapred/local
vi  /home/hadoop/hadoop-2.0.0-cdh4.4.0/etc/hadoop/mapred-site.xml
#增加
       <property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.job.tracker</name>
<value>hdfs://hadoop1:9001</value>
<final>true</final>
</property>
<property>
<name>mapreduce.jobtracker.address</name>
<value>hadoop1:9002</value>
<description>The host and port that the MapReduce job tracker runs
at.  If "local", then jobs are run in-process as a single map
and reduce task.
</description>
</property>
<property>
<name>mapred.system.dir</name>
<value>/app/cdhworkspace/mapred/system</value>
<final>true</final>
</property>
<property>
<name>mapred.local.dir</name>
<value>/app/cdhworkspace/mapred/local</value>
<final>true</final>
</property>

 vi  /home/hadoop/hadoop-2.0.0-cdh4.4.0/etc/hadoop/yarn-site.xml
#增加
<property>
<name>yarn.resourcemanager.address</name>
<value>hadoop1:8080</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>hadoop1:8081</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>hadoop1:8082</value>
</property>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce.shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
    <property>
          <name>yarn.nodemanager.resource.memory-mb</name>
          <value>20480</value>  #默认8192
        </property>

hadoop用户在各节点创建hadoop工作目录
mkdir -p /app/cdhworkspace
11、配置slave
vi /home/hadoop/hadoop-2.0.0-cdh4.4.0/etc/hadoop/slaves
hadoop2
hadoop3
hadoop4
hadoop5
hadoop6
hadoop7
12、同步介质到个节点
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop2:/home/hadoop
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop3:/home/hadoop  
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop4:/home/hadoop
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop5:/home/hadoop
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop6:/home/hadoop
scp -rp ./hadoop-2.0.0-cdh4.4.0 hadoop@hadoop7:/home/hadoop

13、格式化namenode
hdfs namenode -format
14、启动hadoop
cd $HADOOP_HOME/sbin
sh start-dfs.sh 
sh start-yarn.sh 
15 、验证

二、hive安装
tar zxvf hive-0.10.0-cdh4.4.0.tar.gz
cd hive-0.10.0-cdh4.4.0
vi .bash_profile
#添加HIVE_HOME
export HIVE_HOME=/home/hadoop/hive-0.10.0-cdh4.4.0
cd /home/hadoop/hive-0.10.0-cdh4.4.0/conf
cp hive-default.xml.template hive-site.xml
vi hive-site.xml
#修改如下
<property>
  <name>hive.metastore.warehouse.dir</name>
  <value> /app/cdhworkspace/hive/warehouse</value>
  <description>location of default database for the warehouse</description>
</property>
<property>
  <name>hive.exec.scratchdir</name>
  <value> /app/cdhworkspace/hive/hive-${user.name}</value>
  <description>Scratch space for Hive jobs</description>
</property>

##hive连接mysql数据库配置

javax.jdo.option.ConnectionURL: jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true

javax.jdo.option.ConnectionDriverName: com.mysql.jdbc.Driver

javax.jdo.option.ConnectionUserName: hive

javax.jdo.option.ConnectionPassword: hive

#hive调试模式

hive -hiveconf hive.root.logger=DEBUG,console

三、安装zookeeper

tar zxvf zookeeper-3.4.5-cdh4.4.0.tar.gz 

cd /home/hadoop/zookeeper-3.4.5-cdh4.4.0/conf

cp zoo_sample.cfg zoo.cfg

vi zoo.cfg

#增加

autopurge.purgeInterval=2

autopurge.snapRetainCount=10

server.1=Hadoop3:2888:3888

server.2=Hadoop4:2888:3888

server.3=Hadoop5:2888:3888

#创建目录

cd /app/cdhworkspace/

mkdir zookeeper

cd zookeeper

mkdir data

mkdir log

#介质同步

scp -rp zookeeper-3.4.5-cdh4.4.0 hadoop@hadoop3:/home/hadoop

scp -rp zookeeper-3.4.5-cdh4.4.0 hadoop@hadoop4:/home/hadoop

scp -rp zookeeper-3.4.5-cdh4.4.0 hadoop@hadoop5:/home/hadoop

# hadoop3

touch /app/cdhworkspace/zookeeper/data/myid

echo 1 >> myid

# hadoop4

touch /app/cdhworkspace/zookeeper/data/myid

echo 2 >> myid

# hadoop5

touch /app/cdhworkspace/zookeeper/data/myid

echo 3 >> myid

#各节点启动 zk

$ZOOKEEPER_HOME/bin/zkServer.sh start
#验证方法JPS 查看 QuorumPeerMain 进程

四、安装hbase
tar zxvf hbase-0.94.6-cdh4.4.0.tar.gz 
cd hbase-0.94.6-cdh4.4.0/conf
vi hbase-env.sh 
#增加环境变量
export HADOOP_HOME=/home/hadoop/hadoop-2.0.0-cdh4.4.0
export HBASE_HOME=/home/hadoop/hbase-0.94.6-cdh4.4.0
export JAVA_HOME=/usr/java/jdk1.6.0_45
export HBASE_MANAGES_ZK=false
export HBASE_HEAPSIZE=4000

vi hbase-site.xml
<property>
<name>hbase.rootdir</name>
<value>hdfs://hadoop1:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master</name>
<value>hadoop1:60000</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop3,hadoop4,hadoop5</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
<property>
<name>hbase.master.port</name>
<value>60000</value>
</property>
<property>
<name>hbase.master.maxclockskew</name>
<value>180000</value>
<description>Time difference of regionserver from master</description>
</property>
vi regionservers 
#添加节点
hadoop2
hadoop3---hadoop7
#同步介质到各节点
scp -rp hbase-0.94.6-cdh4.4.0 hadoop@hadoop2--7:/home/hadoop
#启动hbase
sh start-hbase.sh
#查看状态
http://10.193.223.4:60010/master-status
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值