flume的使用

################# 
#  netcat 配置  #
#################

#解压、重命名
 cd /opt/download
 tar -zxvf netcat-0.7.1.tar.gz -C /opt/software
  
#可能报错需要将gcc升至5.3以上
 yum -y install centos-release-scl
 yum -y install devtoolset-9-gcc devtoolset-9-gcc-c++ devtoolset-9-binutils
 scl enable devtoolset-9 bash   #临时设置
 echo "source /opt/rh/devtoolset-9/enable" >> /etc/profile   #永久设置
 gcc -v
 #--------------------------------------------------
  gcc version 9.3.1 20200408 (Red Hat 9.3.1-2) (GCC)
 #-------------------------------------------------- 

 cd /opt/software/netcat-0.7.1/
 ./configure --prefix=/usr/local/netcat
 make && make install
 cd /usr/local/netcat
 ls
 #------------------
  bin info man share
 #------------------ 

#配置环境变量 
 vim /etc/profile.d/my.sh
 #---------------------------------------------------------
  #netcat071
  export NETCAT_HOME=/usr/local/netcat
  export PATH=$PATH:$NETCAT_HOME/bin
 #-----------------------------------------------------------
 source /etc/profile 
 
nc localhost 9999 


################
#  flume 安装  #
################

#解压、重命名
 cd /opt/download
 tar -zxvf apache-flume-1.9.0-bin.tar.gz -C /opt/software
 mv apache-flume-1.9.0 flume190 

#配置环境变量
 vim /etc/profile.d/my.sh
 #----------------------------------------
  #flume190
  export FLUME_HOME=/opt/software/flume190
  export PATH=$PATH:$FLUME_HOME/bin
 #----------------------------------------
 source /etc/profile
 
#修改jdk的路径
 cd /opt/software/flume190/conf/
 mv flume-env.sh.template flume-env.sh
 vim flume-env.sh
 #-----------------------------------
  #修改
  export JAVA_HOME=/opt/software/jdk8
 #----------------------------------- 


# flume-ng 运行异常时解决方法如下 

 #1.hdfs-sink时出错
  #更新guava包和hadoop一致
   cd /opt/software/flume-1.9.0
   ls lib|grep guava
   #-------------------
    guava-11.0.2.jar
   #-------------------
   rm -f lib/guava-11.0.0.jar
   find /opt/software/hadoop313/share/hadoop/hdfs/lib -name guava*
   #------------------------------------------------------------------
    /opt/software/hadoop313/share/hadoop/hdfs/lib/guava-27.0-jre.jar
   #------------------------------------------------------------------
   cp /opt/software/hadoop313/share/hadoop/hdfs/lib/guava-27.0-jre.jar lib/
 
  #拷贝hadoop架包到flume里
cp /opt/software/hadoop313/share/hadoop/client/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/common/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/hdfs/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/mapreduce/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/tools/*/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/yarn/*.jar /opt/software/flume-1.9.0/lib/
cp /opt/software/hadoop313/share/hadoop/common/*/*.jar /opt/software/flume-1.9.0/lib/
 
 #2.报错:(java.lang.OutOfMemoryError: GC overhead limit exceeded)
   vim /opt/software/flume190/bin/flume-ng
   #----------------------
    #修改
    JAVA_OPTS="-Xmx2048m"
   #----------------------


#官网
   https://flume.apache.org/releases/content/1.9.0/FlumeUserGuide.html
   
# flume可以设置文件:
  #rollSize
   默认值:1024,当临时文件达到该大小(单位:bytes)时,滚动成目标文件。
   如果设置成0,则表示不根据临时文件大小来滚动文件。 
  #rollCount
   默认值:10,当events数据达到该数量时候,将临时文件滚动成目标文件,
   如果设置成0,则表示不根据events数据来滚动文件。
  #round
   默认值:false,是否启用时间上的"舍弃",类似于四舍五入,
   如果启用,则会影响除了%t的其他所有时间表达式。
  #roundValue
   默认值:1,时间上进行"舍弃"的值。
  #roundUnit
   默认值:seconds,时间上进行"舍弃"的单位,包含:second,minute,hour
  当设置了round、roundValue、roundUnit参数时,需要在sink指定的HDFS路径上指定按照时间生成的目录的格式,
  如有需求,每采集1小时就在HDFS目录上生成一个目录,里面存放这1小时内采集到的数据。 
=========================================================================================================================  
  
# 1、模拟  netcat写入数据,flume接收
 cd ~
 mkdir flume
 vim flume_avro_memory_log.log 
{
#common
a.sources = s1
a.channels = c1
a.sinks = k1
 
#source 
a.sources.s1.type = netcat
a.sources.s1.bind = localhost
a.sources.s1.port = 9999
 
#channel
a.channels.c1.type = memory
a.channels.c1.capacity = 1024
a.channels.c1.transactionCapacity = 128
 
#sink
a.sinks.k1.type = logger
 
#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent --name a --conf /opt/software/flume190/conf/ --conf-file ~/flume/flume_avro_memory_log.log -Dflume.root.logger=INFO,console
       #或者
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_avro_memory_log.log -Dflume.root.logger=INFO,console


# 2、模拟 文件
 cd flume/
 mkdir spooldir  #spooldir文件夹下拖入文件
 cd flume/
 vim flume_spooldir_memory_log.log
{
#common
a.sources = s1
a.channels = c1
a.sinks = k1

#source
a.sources.s1.type = spooldir
a.sources.s1.spoolDir = /root/flume/spooldir
a.sources.s1.fileHeader = true

#channel
a.channels.c1.type = memory
a.channels.c1.capacity = 1024
a.channels.c1.transactionCapacity = 128

#sink
a.sinks.k1.type = logger

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_spooldir_memory_log.log -Dflume.root.logger=INFO,console


# 3、模拟 文件写入hdfs
 cd flume/
 mkdir spooldir  #spooldir文件夹下拖入文件
 cd flume/
 vim flume_spooldir_memory_hdfs.log
{
#common
a.sources = s1
a.channels = c1
a.sinks = k1

#source
a.sources.s1.type = spooldir
a.sources.s1.spoolDir = /root/flume/spooldir
a.sources.s1.fileHeader = true

#channel
a.channels.c1.type = memory
a.channels.c1.capacity = 201326592
a.channels.c1.transactionCapacity = 134217728

#sink
a.sinks.k1.type = hdfs
a.sinks.k1.hdfs.path = hdfs://single1:9000/flume/dir_202228_01
a.sinks.k1.hdfs.filePrefix = tags-events-
a.sinks.k1.hdfs.round = true
a.sinks.k1.hdfs.roundValue = 10
a.sinks.k1.hdfs.roundUnit = second 

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_spooldir_memory_hdfs.log -Dflume.root.logger=INFO,console

# 4、模拟 写入hdfs
 cd flume/
 mkdir spooldir  #spooldir文件夹下拖入文件
 cd flume/
 vim flume_taildir_memory_hdfs.log

{
#common
a.sources=s1
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=TAILDIR
a.sources.s1.positionFile=/root/flume/position_file/pf_01.json
a.sources.s1.filegroups=f1
a.sources.s1.filegroups.f1=/root/flume/spooldir/.*.csv
a.sources.s1.fileHeader=true
a.sources.s1.maxBatchCount=1000


#channel
a.channels.c1.type=memory
a.channels.c1.capacity=16777216
a.channels.c1.transactionCapacity=8388608

#sink
a.sinks.k1.type = hdfs
a.sinks.k1.hdfs.path = hdfs://single1:9000/flume/dir_202228_02
a.sinks.k1.hdfs.filePrefix = tags-events-
a.sinks.k1.hdfs.round = true
a.sinks.k1.hdfs.roundValue = 10
a.sinks.k1.hdfs.roundUnit = second 
a.sinks.k1.hdfs.rollInterval = 0
a.sinks.k1.hdfs.rollCount = 0
a.sinks.k1.hdfs.rollSize = 33554432

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}

flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_taildir_memory_hdfs.log -Dflume.root.logger=INFO,console

# 5、模拟多级联动
#single1执行
vim flume_link_netcat_memory_avro.log
{  
#common
a.sources=s1
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=netcat
a.sources.s1.bind=single1
a.sources.s1.port=9999

#channel
a.channels.c1.type=memory
a.channels.c1.capacity=256
a.channels.c1.transactionCapacity=128

#sink
a.sinks.k1.type=avro
a.sinks.k1.hostname=single01
a.sinks.k1.port=44444

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_link_netcat_memory_avro.log -Dflume.root.logger=INFO,console

#single01执行
vim flume_link_avro_memory_log.log
{  
#common
a.sources=s1
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=avro
a.sources.s1.bind=single01
a.sources.s1.port=44444

#channel
a.channels.c1.type=memory
a.channels.c1.capacity=256
a.channels.c1.transactionCapacity=128

#sink
a.sinks.k1.type=logger

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_link_avro_memory_log.log -Dflume.root.logger=INFO,console

# 6、模拟扇入(single1作为接收,single1和single01同时作为数据发送源)

#服务端-接收single1
vim flume_fanin1_avro_memory_log.log  #single1执行 入口先启动
{
#common
a.sources=s1 s2
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=avro
a.sources.s1.bind=single1
a.sources.s1.port=44444


#channel
a.channels.c1.type=memory
a.channels.c1.capacity=256
a.channels.c1.transactionCapacity=128

#sink
a.sinks.k1.type=logger

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_fanin1_avro_memory_log.log -Dflume.root.logger=INFO,console

#链接发送-客户端single1
vim flume_fanin1_netcat_memory_avro.log
{  
#common
a.sources=s1
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=netcat
a.sources.s1.bind=single1
a.sources.s1.port=9999

#channel
a.channels.c1.type=memory
a.channels.c1.capacity=256
a.channels.c1.transactionCapacity=128

#sink
a.sinks.k1.type=avro
a.sinks.k1.hostname=single1
a.sinks.k1.port=44444

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_fanin1_netcat_memory_avro.log -Dflume.root.logger=INFO,console

#链接发送-客户端single01
vim flume_fanin01_netcat_memory_avro.log
{  
#common
a.sources=s1
a.channels=c1
a.sinks=k1

#source
a.sources.s1.type=netcat
a.sources.s1.bind=single01
a.sources.s1.port=9999

#channel
a.channels.c1.type=memory
a.channels.c1.capacity=256
a.channels.c1.transactionCapacity=128

#sink
a.sinks.k1.type=avro
a.sinks.k1.hostname=single1
a.sinks.k1.port=44444

#join
a.sources.s1.channels = c1
a.sinks.k1.channel = c1
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_fanin01_netcat_memory_avro.log -Dflume.root.logger=INFO,console

# 7、模拟扇出
  #创建kafka主题
  kafka-topics.sh --bootstrap-server single1:9092 --create --topic flume_kafka_channel_fanout_20220209_01 --partitions 1 --replication-factor 1
  #创建kafka消费者
  kafka-console-consumer.sh --bootstrap-server single1:9092 --topic flume_kafka_channel_fanout_20220209_01 --from-beginning
  #创建hbase表
  create "kb16nb:flume_hbase_sink_20220209","tags"
  
vim flume_fanout_taildir_file_log_hbase_kafka.log
{
#common
a.sources=s1
a.channels=c1 c2 c3 
a.sinks=k1 k2 k3

#source
a.sources.s1.type=TAILDIR
a.sources.s1.positionFile=/root/flume/position_file/pf_01.json
a.sources.s1.filegroups=f1
a.sources.s1.filegroups.f1=/root/flume/spooldir/.*.csv
a.sources.s1.fileHeader=true
a.sources.s1.maxBatchCount=1000

#channel
a.channels.c1.type=memory
a.channels.c1.capacity=8192
a.channels.c1.transactionCapacity=4096

a.channels.c2.type=org.apache.flume.channel.kafka.KafkaChannel
a.channels.c2.kafka.bootstrap.servers=single1:9092
a.channels.c2.kafka.topic=flume_kafka_channel_fanout_20220209_01
a.channels.c2.kafka.consumer.group.id=flume_kafka_channel_fanout_20220209_01

a.channels.c3.type=file
a.channels.c3.checkpointDir=/root/flume/checkpoint
a.channels.c3.dataDirs=/root/flume/datadir
a.channels.c3.capacity=8192
a.channels.c3.transactionCapacity=4096
a.channels.c3.checkpointInterval=10000

#sink
a.sinks.k1.type=logger

a.sinks.k2.type=hbase
a.sinks.k2.table=kb16nb:flume_hbase_sink_20220209
a.sinks.k2.columnFamily=tags
a.sinks.k2.zookeeperQuorum=single1:2181
a.sinks.k2.batchSize=50
a.sinks.k2.serializer.regex=.*?@(.*?),(.*?),(.*?),(.*?)
a.sinks.k2.serializer=org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a.sinks.k2.serializer.colNames=userId,movieId,tag,timestamp

a.sinks.k3.type=org.apache.flume.sink.kafka.KafkaSink
a.sinks.k3.kafka.bootstrap.servers=single1:9092
a.sinks.k3.kafka.topic=flume_kafka_sink_fanout_20220209_01
a.sinks.k3.flumeBatchSize=50

#join
a.sources.s1.channels=c1 c2 c3
a.sinks.k1.channel=c1
a.sinks.k2.channel=c2
a.sinks.k3.channel=c3
}
flume-ng agent -n a -c /opt/software/flume190/conf/ -f ~/flume/flume_fanout_taildir_file_log_hbase_kafka.log -Dflume.root.logger=INFO,console

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值