Flume安装及部署

Flume安装及部署

一、安装部署

(1)将apache-flume-1.10.1-bin.tar.gz上传到linux的/opt/software目录下
(2)解压apache-flume-1.10.1-bin.tar.gz到/opt/module/目录下

[xxx@hadoop102 software]$ tar -zxf /opt/software/apache-flume-1.10.1-bin.tar.gz -C /opt/module/

(3)修改apache-flume-1.10.1-bin的名称为flume

[xxx@hadoop102 module]$ mv /opt/module/apache-flume-1.10.1-bin /opt/module/flume

(4)修改conf目录下的log4j2.xml配置文件,配置日志文件路径

[xxx@hadoop102 conf]$ vim log4j2.xml

<?xml version="1.0" encoding="UTF-8"?>

<!--

 Licensed to the Apache Software Foundation (ASF) under one or more

 contributor license agreements.  See the NOTICE file distributed with

 this work for additional information regarding copyright ownership.

 The ASF licenses this file to You under the Apache License, Version 2.0

 (the "License"); you may not use this file except in compliance with

 the License.  You may obtain a copy of the License at

 

    http://www.apache.org/licenses/LICENSE-2.0

 

 Unless required by applicable law or agreed to in writing, software

 distributed under the License is distributed on an "AS IS" BASIS,

 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

 See the License for the specific language governing permissions and

 limitations under the License.

 

-->

<Configuration status="ERROR">

  <Properties>

    <Property name="LOG_DIR">/opt/module/flume/log</Property>

  </Properties>

  <Appenders>

    <Console name="Console" target="SYSTEM_ERR">

      <PatternLayout pattern="%d (%t) [%p - %l] %m%n" />

    </Console>

    <RollingFile name="LogFile" fileName="${LOG_DIR}/flume.log" filePattern="${LOG_DIR}/archive/flume.log.%d{yyyyMMdd}-%i">

      <PatternLayout pattern="%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %equals{%x}{[]}{} - %m%n" />

      <Policies>

        <!-- Roll every night at midnight or when the file reaches 100MB -->

       <SizeBasedTriggeringPolicy size="100 MB"/>

       <CronTriggeringPolicy schedule="0 0 0 * * ?"/>

      </Policies>

      <DefaultRolloverStrategy min="1" max="20">

        <Delete basePath="${LOG_DIR}/archive">

          <!-- Nested conditions: the inner condition is only evaluated on files for which the outer conditions are true. -->

          <IfFileName glob="flume.log.*">

            <!-- Only allow 1 GB of files to accumulate -->

            <IfAccumulatedFileSize exceeds="1 GB"/>
         </IfFileName>
        </Delete>
      </DefaultRolloverStrategy>
    </RollingFile>
  </Appenders>

 

  <Loggers>

    <Logger name="org.apache.flume.lifecycle" level="info"/>

    <Logger name="org.jboss" level="WARN"/>

    <Logger name="org.apache.avro.ipc.netty.NettyTransceiver" level="WARN"/>

    <Logger name="org.apache.hadoop" level="INFO"/>

<Logger name="org.apache.hadoop.hive" level="ERROR"/>

# 引入控制台输出,方便学习查看日志

    <Root level="INFO">

      <AppenderRef ref="LogFile" />

      <AppenderRef ref="Console" />

    </Root>

  </Loggers>

 

</Configuration>

二、Flume传输配置文件

1、log日志采集传输

(1)file_to_kafka.conf
# Name the components on this agent
a1.sources = r1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /opt/module/flume/taildir_position.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /opt/module/applog/log/app.*


# Use a channel which buffers events in memory
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092,hadoop104:9092
a1.channels.c1.kafka.topic = topic_log

a1.channels.c1.parseAsFlumeEvent = false


# Bind the source and sink to the channel
a1.sources.r1.channels = c1

(2)kafka_to_hdfs_log.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092,hadoop104:9092
a1.sources.r1.kafka.topics = topic_log
a1.sources.r1.kafka.consumer.group.id = flume1
#a1.sources.r1.kafka.consumer.auto.offset.reset=earliest
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.atguigu.gmall.interceptor.TimeStampInterceptor$MyBuilder

# Describe the sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/log/topic_log/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = log
a1.sinks.k1.hdfs.round = false

a1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0

#控制输出文件类型
a1.sinks.k1.hdfs.fileType = CompressedStream
a1.sinks.k1.hdfs.codeC = gzip
# Use a channel which buffers events in file
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/module/flume/checkpoint/behavior1
a1.channels.c1.dataDirs = /opt/module/flume/data/behavior1
a1.channels.c1.maxFileSize = 2146435071
a1.channels.c1.capacity = 1000000
a1.channels.c1.keep-alive = 6

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

其中拦截器需要上传至flume/lib/interceptor

2、业务数据传输

kafka_to_hdfs_db.conf
#Rename
a1.sources = r1
a1.channels = c1
a1.sinks = k1
#Source
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092
a1.sources.r1.kafka.topics = topic_db
a1.sources.r1.kafka.consumer.group.id = flume1
a1.sources.r1.kafka.consumer.auto.offset.reset=earliest
a1.sources.r1.setTopicHeader = true
a1.sources.r1.topicHeader = topic
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.atguigu.gmall.interceptor.TableNameAndTimeStampInterceptor$MyBuilder
#channel
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/module/flume/checkpoint/behavior2
a1.channels.c1.dataDirs = /opt/module/flume/data/behavior2/
a1.channels.c1.maxFileSize = 2146435071
a1.channels.c1.capacity = 1000000
a1.channels.c1.keep-alive = 6

## sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/db/%{tableName}_inc/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = db
a1.sinks.k1.hdfs.round = false
a1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0

a1.sinks.k1.hdfs.fileType = CompressedStream
a1.sinks.k1.hdfs.codeC = gzip

## 拼装
a1.sources.r1.channels = c1
a1.sinks.k1.channel= c1

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,针对您的问题,我可以给出以下的回答: Flume是一个分布式的、可靠的、高可用的海量日志采集、聚合和传输的系统。它可以将多种数据源的数据采集到集中式的数据存储系统中,例如Hadoop HDFS、HBase、Kafka等。下面是Flume部署及数据采集准备的步骤: 1. 安装Flume 首先需要从Apache Flume官网(http://flume.apache.org/)下载Flume的二进制安装包,根据所需的版本进行下载。下载完成后,解压缩到指定目录,然后进行配置。 2. 配置Flume Flume的配置文件是一个properties文件,需要根据具体的需求进行配置。主要包括数据源、数据通道和数据存储三部分的配置。其中,数据源可以是多种类型的数据源,例如日志文件、网络数据等;数据通道可以是多种类型的数据通道,例如内存通道、文件通道、Kafka通道等;数据存储可以是多种类型的数据存储,例如HDFS、HBase等。 3. 启动Flume 启动Flume可以使用以下命令: ``` bin/flume-ng agent --conf-file conf/flume.conf --name a1 -Dflume.root.logger=INFO,console ``` 其中,--conf-file指定Flume的配置文件,--name指定Flume的名称,-Dflume.root.logger指定日志输出级别。 4. 配置数据源 针对不同的数据源,Flume有不同的采集方式。例如,针对日志文件,可以使用tail源来实时采集;针对网络数据,可以使用Avro源来采集;针对消息队列,可以使用Kafka源来采集。 5. 配置数据通道 针对不同的数据通道,Flume有不同的配置方式。例如,针对内存通道,可以指定通道的容量和事务大小;针对文件通道,可以指定文件的最大大小和最大文件数;针对Kafka通道,可以指定Kafka的Topic和Broker列表等。 6. 配置数据存储 针对不同的数据存储,Flume有不同的配置方式。例如,针对HDFS存储,可以指定HDFS的NameNode和路径;针对HBase存储,可以指定HBase的Zookeeper地址和表名等。 7. 测试数据采集 完成以上步骤后,可以测试Flume的数据采集功能。可以使用nc命令向Flume发送数据,也可以直接写入日志文件进行测试。测试成功后,即可正式开始使用Flume进行数据采集。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值