hadoop生态安装

## hadoop生态安装

rpm -qa | grep java
rpm -qa | grep mariadb*
rpm -e --nodeps

一、jdk配置文件

 1)my_env.sh
#JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_131
export PATH=$PATH:$JAVA_HOME/bin

#HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-2.7.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin

#HIVE_HOME
export HIVE_HOME=/opt/module/HIVE
export PATH=$PATH:$HIVE_HOME/bin

#KAFKA_HOME
export KAFKA_HOME=/opt/module/kafka
export PATH=$PATH:$KAFKA_HOME/bin

二、hadoop配置文件

 1)core-site.xml

<!-- 指定HDFS中NameNode的地址 -->
<property>
		<name>fs.defaultFS</name>
        <value>hdfs://master:9000</value>
	</property>
	<!-- 指定hadoop运行时产生文件的存储目录, 暂时保存文件的目录,默认是/tmp/hadoop-$user,此位置有可能在重启时被清空,因此必须另外配置。 -->
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/opt/module/hadoop-2.7.3/data/tmp</value>
	</property>
2)hdfs-site.xml

<!-- 指定HDFS的副本数,不配置也可以,因为默认就是3 -->
	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>
<!--指定secondaryNameNode的http访问地址和端口号,在规划中,我们将simple01规划为SecondaryNameNode服务器。如果不配置默认是与namenode同一个节点上启动-->
<property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>slave01:50090</value>
</property>

3)slaves

master
slave01
slave02

4)yarn-site.xml

	<!-- reducer获取数据的方式 -->
	<property>
		 <name>yarn.nodemanager.aux-services</name>
		 <value>mapreduce_shuffle</value>
	</property>
	<!-- 指定YARN的ResourceManager的地址 -->
	<property>
		<name>yarn.resourcemanager.hostname</name>
		<value>slave02</value>
	</property> 
5)mapred-site.xml

	<!-- 指定mr运行在yarn上 -->
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
6)历史服务器(只配matser)
  mapred-site.xml
<!-- 历史服务器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>master:19888</value>
</property>

集群格式化
bin/hdfs namenode -format

三、zookeeper配置文件

 /opt/module/zookeeper-3.4.10/zkData下建立myid
1)zoo.cfg
dataDir=/opt/module/zookeeper-3.4.10/zkData

#######################cluster##########################
server.2=master:2888:3888
server.3=slave01:2888:3888
server.4=slave02:2888:3888

四、hive配置文件

mv mysql-connector-java-5.1.27-bin.jar  /opt/module/hive/lib/
mv $HIVE_HOME/lib/log4j-slf4j-impl-2.10.0.jar 
      $HIVE_HOME/lib/log4j-slf4j-impl-2.10.0.bak

初始化源数据库:schematool -dbType derby -initSchema 
查看MySQL
rpm -qa | grep mysql
rpm -qa | grep MySQL
rpm -qa | grep mariadb

sudo rpm -e --nodeps mysql
sudo rpm -e --nodeps mariadb
sudo rm -rf /var/lib/mysql
sudo yum -y install autoconf

安装mysql:sudo  rpm -ivh MySQL-server-5.6.24-1.el6.x86_64.rpm
查看密码:cat /root/.mysql_secret

启动:sudo service mysql start
安装客户端:sudo rpm -ivh MySQL-client-5.6.24-1.el6.x86_64.rpm

链接:mysql -uroot -p
SET PASSWORD=PASSWORD('000000');
update user set host='%' where host='localhost';
delete from user where Host='hadoop102 ' or Host='127.0.0.1' or Host='::1';
flush privileges;

修改/opt/module/hive/conf目录下的hive-env.sh.template名称为hive-env.sh
1)hive-env.sh
export HADOOP_HOME=/opt/module/hadoop-2.7.3
export HIVE_CONF_DIR=/opt/module/hive/conf

在/opt/module/hive/conf目录下创建一个hive-site.xml
 2)hive-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
	<property>
	  <name>javax.jdo.option.ConnectionURL</name>
	<value>jdbc:mysql://master:3306/metastore?createDatabaseIfNotExist=true</value>
	  <description>JDBC connect string for a JDBC metastore</description>
	</property>
	<property>
	  <name>javax.jdo.option.ConnectionDriverName</name>
	  <value>com.mysql.jdbc.Driver</value>
	  <description>Driver class name for a JDBC metastore</description>
	</property>
	<property>
	  <name>javax.jdo.option.ConnectionUserName</name>
	  <value>root</value>
	  <description>username to use against metastore database</description>
	</property>
	<property>
	  <name>javax.jdo.option.ConnectionPassword</name>
	  <value>000000</value>
	  <description>password to use against metastore database</description>
	</property>
</configuration>

3)hive-site.xml
<property>
	<name>hive.cli.print.header</name>
	<value>true</value>
</property>
<property>
	<name>hive.cli.print.current.db</name>
	<value>true</value>
</property>

4)hive-log4j.properties
hive.log.dir=/opt/module/hive/logs

(发送)
bin/start-hbase.sh

五、HBase的配置文件

1)hbase-env.sh

export JAVA_HOME=/opt/module/jdk1.8.0_211
export HBASE_MANAGES_ZK=false

2)hbase-site.xml

<property>
		<name>hbase.rootdir</name>
		<value>hdfs://master:9000/HBase</value>
	</property>

	<property>
		<name>hbase.cluster.distributed</name>
		<value>true</value>
	</property>

   <!-- 0.98后的新变动,之前版本没有.port,默认端口为60000 -->
	<property>
		<name>hbase.master.port</name>
		<value>16000</value>
	</property>

	<property>   
		<name>hbase.zookeeper.quorum</name>
	     <value>master:2181,slave01:2181,slave02:2181</value>
	</property>

	<property>   
		<name>hbase.zookeeper.property.dataDir</name>
	     <value>/opt/module/zookeeper-3.4.10/zkData</value>
	</property>

3)regionservers
master
slave01
slave02

4)软连接hadoop配置文件到hbase:
 ln -s /opt/module/hadoop-2.7.2/etc/hadoop/core-site.xml 
/opt/module/hbase/conf/core-site.xml
 ln -s /opt/module/hadoop-2.7.3/etc/hadoop/hdfs-site.xml 
/opt/module/hbase-1.3.1/conf/hdfs-site.xml

六、sqoop配置文件

1)sqoop-env.sh

export HADOOP_COMMON_HOME=/opt/module/hadoop-2.7.2
export HADOOP_MAPRED_HOME=/opt/module/hadoop-2.7.2
export HIVE_HOME=/opt/module/hive
export ZOOKEEPER_HOME=/opt/module/zookeeper-3.4.10
export ZOOCFGDIR=/opt/module/zookeeper-3.4.10
export HBASE_HOME=/opt/module/hbase-1.3.1

2)拷贝jdbc驱动到sqoop的lib目录下,如:
$ cp mysql-connector-java-5.1.27-bin.jar /opt/module/sqoop-1.4.6.bin__hadoop-2.0.4-alpha/lib/

七、kafka配置文件

/opt/module/kafka目录下创建logs文件夹
      mkdir logs
分别在hadoop103和hadoop104上修改配置文件/opt/module/kafka/config/server.properties中的broker.id=1、broker.id=2
1)server.properties

#broker的全局唯一编号,不能重复
broker.id=0
#删除topic功能使能
delete.topic.enable=true
#kafka运行日志存放的路径
log.dirs=/opt/module/kafka/data
log.retention.hours=168
#配置连接Zookeeper集群地址
zookeeper.connect=hadoop102:2181,hadoop103:2181,hadoop104:2181/kafka

bin/kafka-server-start.sh config/server.properties &
bin/kafka-server-stop.sh stop

八、flume

1)修改apache-flume-1.7.0-bin的名称为flume
 mv apache-flume-1.7.0-bin flume
2)将flume/conf下的flume-env.sh.template文件修改为flume-env.sh,并配置flume-env.sh文件
export JAVA_HOME=/opt/module/jdk1.8.0_144
3)将rpm软件包(xinetd-2.3.14-40.el6.x86_64.rpm、telnet-0.17-48.el6.x86_64.rpm和telnet-server-0.17-48.el6.x86_64.rpm)拷入/opt/software文件夹下面。
sudo rpm -ivh xinetd-2.3.14-40.el6.x86_64.rpm
sudo rpm -ivh telnet-0.17-48.el6.x86_64.rpm
sudo rpm -ivh telnet-server-0.17-48.el6.x86_64.rpm
4)判断44444端口是否被占用
 sudo netstat -tunlp | grep 44444
5)创建Flume Agent配置文件flume-telnet-logger.conf
在flume目录下创建job文件夹并进入job文件夹。
mkdir job
cd job/
在job文件夹下创建Flume Agent配置文件flume-telnet-logger.conf。
 touch flume-telnet-logger.conf
在flume-telnet-logger.conf文件中添加如下内容。
 vim flume-telnet-logger.conf
添加内容如下:
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = localhost
a1.sources.r1.port = 44444

# Describe the sink
a1.sinks.k1.type = logger

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1


[atguigu@hadoop102 job]$ touch flume-file-hdfs.conf
[atguigu@hadoop102 job]$ vim flume-file-hdfs.conf
添加如下内容
# Name the components on this agent
a2.sources = r2
a2.sinks = k2
a2.channels = c2

# Describe/configure the source
a2.sources.r2.type = exec
a2.sources.r2.command = tail -F /opt/module/hive/logs/hive.log
a2.sources.r2.shell = /bin/bash -c

# Describe the sink
a2.sinks.k2.type = hdfs
a2.sinks.k2.hdfs.path = hdfs://hadoop102:9000/flume/%Y%m%d/%H
#上传文件的前缀
a2.sinks.k2.hdfs.filePrefix = logs-
#是否按照时间滚动文件夹
a2.sinks.k2.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k2.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k2.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k2.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k2.hdfs.batchSize = 1000
#设置文件类型,可支持压缩
a2.sinks.k2.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k2.hdfs.rollInterval = 600
#设置每个文件的滚动大小
a2.sinks.k2.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k2.hdfs.rollCount = 0
#最小冗余数
a2.sinks.k2.hdfs.minBlockReplicas = 1

# Use a channel which buffers events in memory
a2.channels.c2.type = memory
a2.channels.c2.capacity = 1000
a2.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r2.channels = c2
a2.sinks.k2.channel = c2
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

铁骑惊梦,江山如画,睥睨天下豪杰

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值