log4j配置
配置文件
修改log4j.properties
# 设置要进行数据跟踪程序所在的名称
log4j.logger.cn.mldn.myflume=INFO ,flume
log4j.appender.flume=org.apache.flume.clients.log4jappender.Log4jAppender
log4j.appender.flume.layout=org.apache.log4j.PatternLayout
log4j.appender.flume.Hostname=10.238.103.141
log4j.appender.flume.Port=44444
所属依赖
<dependency>
<groupId>org.apache.flume</groupId>
<artifactId>flume-ng-core</artifactId>
<version>1.7.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.apache.flume.flume-ng-clients</groupId>
<artifactId>flume-ng-log4jappender</artifactId>
<version>1.7.0</version>
</dependency>
程序实现
package cn.mldn.myflume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestFlumeDemo {
private static final Logger LOGGER = LoggerFactory
.getLogger(TestFlumeDemo.class);
public static void main(String[] args) {
for (int x = 0 ; x < 1 ; x ++) {
LOGGER.info("mldn.cn");
}
}
}
Flume配置
配置文件
/usr/local/flume/conf/flume2.conf
agent1.sources = source
agent1.sinks = sink
agent1.channels = channel
#agent1-source
#agent1.sources.source.type = spooldir
#agent1.sources.source.spoolDir = /usr/local/flume/logdir
agent1.sources.source.type = avro
agent1.sources.source.bind = 10.238.103.141
agent1.sources.source.port = 44444
#agent1-sink
agent1.sinks.sink.type = org.apache.flume.sink.kafka.KafkaSink
agent1.sinks.sink.topic = mldn-flume
agent1.sinks.sink.brokerList = 10.238.103.141:9095
#agent1.sinks.sink.requiredAcks = 1
#agent1.sinks.sink.batchSize = 100
#agent1-channel
agent1.channels.channel.type = memory
agent1.channels.channel.capacity = 1000
agent1.channels.channel.transactionCapacity = 100
#绑定source和sink到channel上
agent1.sources.source.channels = channel
agent1.sinks.sink.channel = channel
/usr/local/flume/conf/flume2.conf
agent1.sources = source
agent1.sinks = sink
agent1.channels = channel
#agent1-source
#agent1.sources.source.type = spooldir
#agent1.sources.source.spoolDir = /usr/local/flume/logdir
agent1.sources.source.type = avro
agent1.sources.source.bind = 10.238.103.141
agent1.sources.source.port = 44444
#agent1-sink
agent1.sinks.sink.type = org.apache.flume.sink.kafka.KafkaSink
agent1.sinks.sink.topic = mldn-flume
agent1.sinks.sink.brokerList = 10.238.103.141:9095
#agent1.sinks.sink.requiredAcks = 1
#agent1.sinks.sink.batchSize = 100
#agent1-channel
agent1.channels.channel.type = memory
agent1.channels.channel.capacity = 1000
agent1.channels.channel.transactionCapacity = 100
#绑定source和sink到channel上
agent1.sources.source.channels = channel
agent1.sinks.sink.channel = channel
/usr/local/flume/conf/flume3.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = avro
a1.sources.r1.bind = 10.238.103.141
a1.sources.r1.port = 44444
# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.topic = mldn-flume
a1.sinks.k1.brokerList = 10.238.103.141:9095
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
#a1.channels.c1.kafka.bootstrap.servers = 10.238.103.141:9095
#a1.channels.c1.kafka.topic = mldn-flume
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
Flume服务的启动
/usr/local/flume/bin/flume-ng agent --conf /usr/local/flume/conf/ --conf-file /usr/local/flume/conf/flume3.conf --name a1 -Dflume.root.logger=INFO,console > /usr/data/flume.log 2>&1 &
或
/usr/local/flume/bin/flume-ng agent --conf /usr/local/flume/conf/ --conf-file /usr/local/flume/conf/flume2.conf --name agent1 -Dflume.root.logger=INFO,console > /usr/data/flume.log 2>&1 &
此时,如果程序里执行LOGGER.info("mldn.cn"),将会mldn.cn写入到kafka里