一、采集启动日志(使用自定义拦截器)
### --- 定义配置文件
[root@hadoop02 ~]# vim /data/yanqidw/conf/flume-log2hdfs2.conf
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# taildir source
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /data/yanqidw/conf/startlog_position.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /data/yanqidw/logs/start/.*log
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = cn.yanqi.dw.flume.interceptor.CustomerInterceptor$Builder
# memorychannel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 2000
# hdfs sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /user/data/logs/start/dt=%{logtime}/
a1.sinks.k1.hdfs.filePrefix = startlog.
a1.sinks.k1.hdfs.fileType = DataStream
# 配置文件滚动方式(文件大小32M)
a1.sinks.k1.hdfs.rollSize = 33554432
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollInterval = 0
a1.sinks.k1.hdfs.idleTimeout = 0
a1.sinks.k1.hdfs.minBlockReplicas = 1
# 向hdfs上刷新的event的个数
a1.sinks.k1.hdfs.batchSize = 1000
# 使用本地时间
# a1.sinks.k1.hdfs.useLocalTimeStamp = true
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
### --- 修改:
~~~ 给source增加自定义拦截器;
~~~ 去掉本地时间戳 a1.sinks.k1.hdfs.useLocalTimeStamp = true
~~~ 根据header中的logtime写文件
~~~ # 启动agent服务
[root@hadoop02 ~]# flume-ng agent --conf /opt/yanqi/servers/flume-1.9.0/conf \
--conf-file /data/yanqidw/conf/flume-log2hdfs2.conf \
-name a1 -Dflume.root.logger=INFO,console
### --- 拷贝日志
[root@hadoop02 ~]# cp /data/yanqidw/logs/data/start0802.log /data/yanqidw/logs/start/start7.log
[root@hadoop02 ~]# cp /data/yanqidw/logs/data/start0802.log /data/yanqidw/logs/start/start8.log
### --- 检查HDFS文件
[root@hadoop02 ~]# hdfs dfs -ls /user/data/logs/start/
drwxr-xr-x - root supergroup 0 2021-09-28 18:28 /user/data/logs/start/2021-09-28
drwxr-xr-x - root supergroup 0 2021-09-28 20:00 /user/data/logs/start/dt=2020-07-21
drwxr-xr-x - root supergroup 0 2021-09-28 19:51 /user/data/logs/start/dt=Unknown