解决方案:
1.flume端使用taildir source读取日志,利用interceptor给日志加上头信息key为topic,value为对应日志的主题,使用kafka sink输出到kafka
2.kafka端建立四个topic 分别对应hive日志的四个级别,通过java编写consumer由于info日志要有三个consumer消费,所以可以通过给info主题设置三个分区三个consumer在一个消费者组这样效率最高,或者可以三个consumer在不同的消费者组。
3.通过hdfs sink从header中取出时间戳和日志类型信息,将日志输出到对应的hdfs目录。
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = TAILDIR
a1.sources.r1.filegroups = f1
#hive日志目录
a1.sources.r1.filegroups.f1 = /tmp/root/hive.log
a1.sources.r1.positionFile = /tmp/flume/position/hivelog_position.json
# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
#kafka broker地址
a1.sinks.k1.kafka.bootstrap.servers = localhost:9092,master:9092,slave2:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sources.r1.interceptors = i1
#intercepter全类名
a1.sources.r1.interceptors.i1.type = interceptor.KafkaInterceptor$Builder
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
intercepter代码
@Override
public Event intercept(Event event) {
Map<String,String> header = event.getHeaders();
String body = new String(event.getBody());
String levelValue = null; //日志级别
if(body.contains("INFO"))
levelValue = "info";
else if(body.contains("WARNING"))
levelValue = "warn";
else if(body.contains("ERROR"))
levelValue = "error";
else
levelValue = "debug";
header.put("topic",levelValue);
System.out.println("level:"+header.get("level"));
System.out.println("date:"+header.get("date"));
return event;
}
@Override
public List<Event> intercept(List<Event> list) {
events.clear();
int lastIndex = -1;
for(Event e:list){
if(haveHeadMessage(new String(e.getBody()))){
events.add(intercept(e));
lastIndex = events.size()-1;//更新正确匹配的event在events中的位置
}else{
String body = new String(events.get(lastIndex).getBody())+
"\n"+ new String(e.getBody());
System.out.println("body:"+body);
events.get(lastIndex).setBody(body.getBytes());
}
}
return events;
}
消费者代码
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "master:9092");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
//重置消费者offset
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
Consumer<String, String> consumer = new KafkaConsumer<>(properties);
ArrayList<String> a = new ArrayList<>();
a.add("first");
consumer.subscribe(a);
//拉取数据
while(true){
ConsumerRecords<String, String> reds = consumer.poll(100);
for(ConsumerRecord c : reds){
System.out.println(c.key()+"---"+c.value());
}
}
Flume读取Hive日志见我上篇文章。