之前本想用下面这种架构搭建一个集群来学习大数据
但是发现这种情况下,flume到sparkstreaming的数据并不是实时过去,都是嗝了一个很长很长的间隔突然过来一堆,很玄学。这里并不是我设置Spark的采集周期的问题。
后来改变架构为kafka直接把数据推到sparkStreaming里面,是下面这种架构
这里我在HDFS采集的flume上面挂了一个拦截器,把日志分为启动日志和事件日志,然后分时间存放。结果是下面这样
日志分别存放于这两个日志文件夹,并且以时间戳分文件,因为我设置的滚动时间是10分钟
这里记录一下flume拦截器的实现,其实也很好做的
<dependency>
<groupId>org.apache.flume</groupId>
<artifactId>flume-ng-core</artifactId>
<version>1.9.0</version>
</dependency>
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;
import java.io.UnsupportedEncodingException;
import java.util.List;
import java.util.Map;
public class FlumeLoggerIntercept implements Interceptor {
public static String TIMESTAMP = "timestamp";
@Override
public void initialize() {
}
@Override
public void close() {
}
@Override
public Event intercept(Event event) {
try {
String body = new String(event.getBody(),"UTF-8");
String[] split = body.split("|");
Map<String, String> headers = event.getHeaders();
headers.put(TIMESTAMP, split[0]);
if(body.contains("start")){
headers.put("logType", "start-log");
}else{
headers.put("logType", "event-log");
}
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
return event;
}
@Override
public List<Event> intercept(List<Event> events) {
for (Event event : events) {
intercept(event);
}
return events;
}
public static class Builder implements Interceptor.Builder {
// 获取配置文件的属性
@Override
public Interceptor build() {
return new FlumeLoggerIntercept();
}
@Override
public void configure(Context context) {
}
}
}
最后在flume的配置文件里面使用这个拦截器就可以了,这里贴一下flume的配置文件
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2
a1.sources.r1.selector.type = replicating
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = FlumeLoggerIntercept$Builder
# 配置source
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 800
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = node03:9092,node04:9092,node05:9092
a1.sources.r1.kafka.topics = batch
a1.sources.r1.kafka.consumer.group.id = custom.g.id
# ----------------------------------------------------------------------------------
# 配置sink1
#配置HDFS sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://node01:9000/flume/%Y%m%d/%H/%{logType}
a1.sinks.k1.hdfs.fileType = DataStream
a1.sinks.k1.hdfs.writeFormat = TEXT
a1.sinks.k1.hdfs.inUsePrefix=_
a1.sinks.k1.hdfs.round = true
a1.sinks.k2.hdfs.roundUnit = hour
a1.sinks.k2.hdfs.roundValue = 1
a1.sinks.k2.hdfs.useLocalTimeStamp = true
#设置输出文件的前缀
a1.sinks.k1.hdfs.filePrefix = logs
#设置输出文件的后缀
a1.sinks.k1.hdfs.fileSuffix = .log
a1.sinks.k1.hdfs.batchSize = 800
a1.sinks.k1.hdfs.rollSize = 134217700
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollInterval = 600
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a2.sinks.k1.hdfs.minBlockReplicas = 1
# 配置sink2
a1.sinks.k2.type = logger
# ----------------------------------------------------------------------------------
a1.channels.c1.type = memory
a1.channels.c1.capacity = 2000
a1.channels.c1.transactionCapacity = 1000
#
a1.channels.c2.type = memory
a1.channels.c2.capacity = 2000
a1.channels.c2.transactionCapacity = 1000
# ----------------------------------------------------------------------------------
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2