Hadoop

安装虚拟机

关闭Selinux

vim /etc/sysconfig/selinux

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled # 设置为disabled
# SELINUXTYPE= can take one of three two values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected.
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

关闭防火墙和网络管理器

service iptables stop
chkconfig iptables off
# 关闭网络管理器
chkconfig NetworkManager off

免密安装

三台

ssh-keygen -t rsa 只回车

ssh-copy-id hadoop01
ssh-copy-id hadoop02
ssh-copy-id hadoop03

验证
ssh hadoop02
exit

创建文件夹

三台
mkdir -p /export/software
mkdir -p /export/servers

安装JDK

卸载已有JDK

rpm -qa | grep java
rpm -e --nodeps java-1.6.0-openjdk-1.6.0.0-1.66.1.13.0.el6.x86_64
rpm -e --nodeps java-1.7.0-openjdk-1.7.0.45-2.4.3.3.el6.x86_64
rpm -e --nodeps tzdata-java-2013g-1.el6.noarch
	//rpm -e --nodeps后面是grep出来的名字

安装JDK

安装

cd /export/software/
将jdk上传到/export/software
tar -zxvf jdk-8u141-linux-x64.tar.gz -C /export/servers/

改名
cd /export/servers/
mv jdk1.8.0_141/ jdk

配置JDK环境变量
vim /etc/profile
	#JAVA_SETTINGS
	export JAVA_HOME=/export/servers/jdk
	export PATH=$PATH:$JAVA_HOME/bin

source /etc/profile

查看
java -version

将JDK和配置文件分发给其他节点

scp -r /export/servers/jdk hadoop02:/export/servers
scp -r /export/servers/jdk hadoop03:/export/servers
scp /etc/profile hadoop02:/etc/profile
scp /etc/profile hadoop03:/etc/profile

子节点
source /etc/profile

安装mysql

删除自带mysql

rpm -qa | grep mysql
rpm -e --nodeps mysql-libs-5.1.71-1.el6.x86_64

安装mysql

mkdir -p /export/software/mysql

上传文件到/export/software/mysql

cd /export/software/mysql
rpm -ivh *.rpm

查看随机密码
cat /root/.mysql_secret

启动mysql
service mysql start

修改密码
	set PASSWORD=PASSWORD('123456');
	exit

远程授权 
	GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '123456' WITH GRANT OPTION;
	flush privileges;

验证
navicat测试连接

安装hadoop

安装

上传文件到/export/software/
cd /export/software/

tar -zxvf hadoop-2.7.5.tar.gz -C /export/servers/

vim /etc/profile
	#hadoop_home
	export HADOOP_HOME=/export/servers/hadoop-2.7.5
	export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

source /etc/profile

验证
hadoop version

配置

修改hadoop-env.sh文件

vim /export/servers/hadoop-2.7.5/etc/hadoop/hadoop-env.sh
	修改
	export JAVA_HOME=/export/servers/jdk

修改core-site.xml文件

vim /export/servers/hadoop-2.7.5/etc/hadoop/core-site.xml
	添加
    <configuration>
        <property>        
            <name>fs.defaultFS</name>        
            <value>hdfs://hadoop01:9000</value>    
        </property>
        <property> 
            <name>hadoop.tmp.dir</name>       
            <value>/export/servers/hadoop-2.7.5/hadoopdata</value>
        </property>
     </configuration>

创建hadoop运行时生成数据的临时目录
mkdir -p /export/servers/hadoop-2.7.5/hadoopdata

修改hdfs-site.xml文件

vim /export/services/hadoop-2.7.5/etc/hadoop/hdfs-site.xml
    添加
     <configuration>
        <property>
                <name>dfs.replication</name>
                <value>2</value>
        </property>
        <property>
                <name>dfs.namenode.secondary.http-address</name>
                <value>hadoop01:50090</value>
        </property>
    </configuration>

修改slaves文件

vim /export/services/hadoop-2.7.5/etc/hadoop/slaves
	修改
    hadoop01
    hadoop02
    hadoop03

修改mapred-site.xml文件

cp mapred-site.xml.template mapred-site.xml

vim /export/servers/hadoop-2.7.5/etc/hadoop/mapred-site.xml
    添加
    <configuration>
        <property>
            <name>mapreduce.framework.name</name>
            <value>yarn</value>
        </property>
    </configuration>

修改yarn-site.xml文件

vim /export/services/hadoop-2.7.5/etc/hadoop/yarn-site.xml
    添加
    <configuration>
        <property>
                <name>yarn.resourcemanager.hostname</name>
                    <value>hadoop01</value>
        </property>
        <property>
                    <name>yarn.nodemanager.aux-services</name>
                    <value>mapreduce_shuffle</value>
        </property>
    </configuration>

将配置文件分发给其他子节点

scp /etc/profile hadoop02:/etc/profile
scp /etc/profile hadoop03:/etc/profile
scp -r /export/servers/hadoop-2.7.5/ root@hadoop02:/export/servers/
scp -r /export/servers/hadoop-2.7.5/ root@hadoop03:/export/servers/

子节点
source /etc/profile

格式化文件系统

hadoop namenode -format

//successfully formatted代表成功

启动	//三台
start-all.sh
//stop-all.sh

验证
jps
hadoop01	DataNode、NodeManager、NameNode、Jps、ResourceManager
hadoop02	Jps、SecondaryNameNode
hadoop03	Jps

启动hadoop

start-all.sh

hadoop01:50070
三台

zookpeeper集群搭建

安装

上传文件到/export/software
cd /export/software

tar -zxvf zookeeper-3.4.9.tar.gz -C /export/servers/

修改环境变量
	#ZK_HOME SETTING
	export ZK_HOME=/export/servers/zookeeper-3.4.9
	export PATH=$PATH:$ZK_HOME/bin

分发给其他节点
scp -r /export/servers/zookeeper-3.4.9/ root@hadoop02:/export/servers/
scp -r /export/servers/zookeeper-3.4.9/ root@hadoop03:/export/servers/
scp -r /etc/profile root@hadoop02:/etc/
scp -r /etc/profile root@hadoop03:/etc/

source /etc/profile

配置

cd /export/servers/zookeeper-3.4.9/
mkdir logs
cd conf/

vim log4j.properties
	zookeeper.log.dir=/export/servers/zookeeper-3.4.9/logs

mkdir -p /export/servers/zookeeper-3.4.9/data

mv zoo_sample.cfg zoo.cfg 
vim zoo.cfg 
	dataDir=/export/servers/zookeeper-3.4.9/data

	server.1=hadoop01:2888:3888
	server.2=hadoop02:2888:3888
	server.3=hadoop03:2888:3888

cd /export/servers/zookeeper-3.4.9/data/

hadoop01:
echo 1 >> myid
hadoop02:
echo 2 >> myid
hadoop03:
echo 3 >> myid


hadoop02\03
rm zoo_sample.cfg

scp /export/servers/zookeeper-3.4.9/conf/zoo.cfg root@hadoop02:/export/servers/zookeeper-3.4.9/conf/
scp /export/servers/zookeeper-3.4.9/conf/zoo.cfg root@hadoop03:/export/servers/zookeeper-3.4.9/conf/

启动

三台
zkServer.sh start

安装kafka

只要hadoop01
上传文件到/export/software

tar -zxvf kafka-eagle-1.0.8.tar.gz -C /export/servers/

cd /export/servers/kafka-eagle-1.0.8/

配置环境变量
	#KE_HOME SETTING
	export KE_HOME=/export/servers/kafka-eagle-1.0.8
	export PATH=$PATH:KE_HOME/bin

source /etc/profile

修改配置
vim /export/servers/kafka-eagle-1.0.8/conf/system-config.properties
	kafka.zk.list=hadoop01:2181,hadoop02:2181,hadoop03:2181

启动
ke.sh start

http://hadoop01:8048/ke/

Hive平台搭建

将文件上传到/export/software
cd /export/software/

tar -zxvf apache-hive-1.2.1-bin.tar.gz -C /export/servers/

cd /export/servers/apache-hive-1.2.1-bin/lib
上传mysql的jar包

cd /export/servers/apache-hive-1.2.1-bin/conf
上传hive-site.xml文件
	jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true
	root
	123456

mv hive-log4j.properties.template hive-log4j.properties

vim hive-log4j.properties
	hive.log.dir=/export/servers/apache-hive-1.2.1-bin/logs

mkdir - p /export/servers/apache-hive-1.2.1-bin/logs

启动mysql
service mysql start

测试
cd /export/servers/apache-hive-1.2.1-bin/bin
./hiveserver2 

另一个窗口
./beeline 
	! connect jdbc:hive2://hadoop01:10000
	root
	123456

Flume安装及配置

安装

上传文件到/export/software/
cd /export/software/

tar -zxvf apache-flume-1.9.0-bin.tar.gz -C /export/servers/

配置

cd /export/servers/apache-flume-1.9.0-bin/conf/

上传netcat-logger.conf文件

cd /export/servers/apache-flume-1.9.0-bin/

bin/flume-ng agent -c ./conf -f ./conf/netcat-logger.conf -n a1 -Dflume.root.logger=INFO,console

复制新的窗口:
将telnet安装包上传到/export/software/
cd /export/software/

rpm -ivh telnet-0.17-47.el6_3.1.x86_64.rpm 

telnet localhost 44444

测试
hello world	//第一个窗口会有提示

cd /root/
mkdir logs

cd /export/servers/apache-flume-1.9.0-bin/conf/
上传spooldir-hdfs.conf文件

vim spooldir-hdfs.conf
删除无效文字

cd /export/servers/apache-flume-1.9.0-bin

bin/flume-ng agent -c ./conf -f ./conf/spooldir-hdfs.conf -n a1 -Dflume.root.logger=INFO,console

第二个窗口	测试
cd logs/

上传文件	//第一个窗口会有提示

页面查看	Browse Directory
http://hadoop01:50070/

集群搭建

scp -r /export/servers/apache-flume-1.9.0-bin/ root@hadoop02:/export/servers/
scp -r /export/servers/apache-flume-1.9.0-bin/ root@hadoop03:/export/servers/

hadoop01:

cd /export/servers/apache-flume-1.9.0-bin/conf/

上传exec-avro.conf文件

hadoop02/03:

cd /export/servers/apache-flume-1.9.0-bin/conf/

上传avro-logger.conf文件

hadoop03:

vim avro-logger.conf

	a1.sources.r1.bind = hadoop03  

启动

hadoop02/03:

cd /export/servers/apache-flume-1.9.0-bin

bin/flume-ng agent -c ./conf -f ./conf/avro-logger.conf -n a1 -Dflume.root.logger=INFO,console

hadoop01:

cd /export/servers/apache-flume-1.9.0-bin

bin/flume-ng agent -c ./conf -f ./conf/exec-avro.conf -n a1 -Dflume.root.logger=INFO,console

复制一个hadoop01窗口	测试
while true;do echo "access access ...">>/root/logs/123.log;sleep 4;done

hadoop02 或 03会有信息提示

azkaban安装及配置

安装

cd /export/software

上传文件
azkaban-3.50.0.tar.gz
git-1.7.1-3.el6_4.1.x86_64.rpm
perl-Error-0.17015-4.el6.noarch.rpm
perl-Git-1.7.1-3.el6_4.1.noarch.rpm

rpm -ivh git-1.7.1-3.el6_4.1.x86_64.rpm perl-Error-0.17015-4.el6.noarch.rpm perl-Git-1.7.1-3.el6_4.1.noarch.rpm 

tar -zxvf azkaban-3.50.0.tar.gz

cd /export/software/azkaban-3.50.0

 ./gradlew build -x test	//要联网

配置(窗体一/窗体二)

配置数据库

二、
mysql -uroot -p

	create database azkaban;
	use azkaban;

一、
cd /export/software/azkaban-3.50.0/azkaban-db/build/distributions/

tar -zxvf azkaban-db-0.1.0-SNAPSHOT.tar.gz 

二、
	source /export/software/azkaban-3.50.0/azkaban-db/build/distributions/azkaban-db-0.1.0-SNAPSHOT/create-all-sql-0.1.0-SNAPSHOT.sql		//azkaban-db的路径加数据库文件的路径

quit

测试
打开navicat
hadoop01多个azkaban

安装azkaban-web-server

一、
cd /export/servers/

mkdir azkaban

cd /export/software/azkaban-3.50.0/azkaban-web-server/build/distributions

tar -zxvf azkaban-web-server-0.1.0-SNAPSHOT.tar.gz -C /export/servers/azkaban/

二、
cd /export/servers/azkaban/azkaban-web-server-0.1.0-SNAPSHOT/

一、
cd /export/software/azkaban-3.50.0/azkaban-solo-server/build/distributions

tar -zxvf azkaban-solo-server-0.1.0-SNAPSHOT.tar.gz 

cd azkaban-solo-server-0.1.0-SNAPSHOT

cp -r conf/ /export/servers/azkaban/azkaban-web-server-0.1.0-SNAPSHOT/

cp -r plugins/ /export/servers/azkaban/azkaban-web-server-0.1.0-SNAPSHOT/

二、
mkdir extlib

生成ssl密钥
keytool -keystore keystore -alias jetty -genkey -keyalg RSA
//区分大小写
	123456	六次回车	y	123456

cd conf/
rm azkaban.properties 

上传azkaban.properties和log4j.properties文件

vim azkaban-users.xml
	添加
	<user password="admin" roles="metrics,admin" username="admin"/>

安装azkaban-exec-server

一、
cd /export/software/azkaban-3.50.0/azkaban-exec-server/build/distributions

tar -zxvf azkaban-exec-server-0.1.0-SNAPSHOT.tar.gz -C /export/servers/azkaban/

二、
cd /export/servers/azkaban/azkaban-web-server-0.1.0-SNAPSHOT

cp -r conf/ /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/
cp -r extlib/ /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/
cp -r plugins/ /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/

cd /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/conf/

rm azkaban.properties 
上传azkaban.properties文件		//和azkaban-web-server的是不同文件

vim /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/plugins/jobtypes/commonprivate.properties
    添加
    memCheck.enabled=false

启动

cd /export/servers/azkaban/azkaban-exec-server-0.1.0-SNAPSHOT/

bin/start-exec.sh 

cd /export/servers/azkaban/azkaban-web-server-0.1.0-SNAPSHOT

bin/start-web.sh 

查看jps

https://hadoop01:8443/
账号:admin
密码:admin

Sqoop安装及配置

安装

上传文件到/export/software/
cd /export/software/

tar -zxvf sqoop-1.4.6.bin__hadoop-2.0.4-alpha.tar.gz -C /export/servers/

配置

cd /export/servers

mv sqoop-1.4.6.bin__hadoop-2.0.4-alpha/ sqoop-1.4.6

cd sqoop-1.4.6/conf/

mv sqoop-env-template.sh sqoop-env.sh 

vim sqoop-env.sh
	export HADOOP_COMMON_HOME=/export/servers/hadoop-2.7.5
	export HADOOP_MAPRED_HOME=/export/servers/hadoop-2.7.5
	export HIVE_HOME=/export/servers/apache-hive-1.2.1-bin

vim /etc/profile
    #SQOOP_HOME SETTING
    export SQOOP_HOME=/export/servers/sqoop-1.4.6
    export PATH=$PATH:$SQOOP_HOME/bin:

source /etc/profile

cd /export/servers/sqoop-1.4.6/lib
上传mysql的jar包

测试
sqoop help

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值