package org.util;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.log4j.Logger;
/**
* kafka消息端控制层
*
* @author
*
*/
public class KafkaConsumerUtil {
private static final Logger LOGGER = Logger
.getLogger(KafkaConsumerUtil.class);
/**
* 锁
*/
private static ReentrantLock lock = new ReentrantLock();
public void syncPull() {
try {
ScheduledExecutorService ses = Executors
.newScheduledThreadPool(ConstantNumber.NUM_TEN);
// 如果前面的任务还未完成,则调度不会启动。
ses.scheduleWithFixedDelay(new Runnable() {
public void run() {
try {
LOGGER.info("run being:"
+ new SimpleDateFormat(Constant.FORMAT)
.format(new Date()));
execute();
LOGGER.info("run end:"
+ new SimpleDateFormat(Constant.FORMAT)
.format(new Date()));
} catch (Exception e) {
LOGGER.error("syncPull_run_error:" + e.getMessage());
}
}
}, ConstantNumber.NUM_THIRTY,
SystemConfig.getLong(Constant.INTERVAL_TIMER),
TimeUnit.SECONDS);// 启动多少秒后执行,然后周期多少秒执行一次
} catch (Exception e) {
LOGGER.error("syncPull_error:" + e.getMessage());
}
}
/**
* 执行
*/
private void execute() {
if (lock.tryLock()) {
KafkaConsumer<String, String> consumer = null;
try {
consumer = initConsumer();
consumer.subscribe(Arrays
.asList(SystemConfig.getString(Constant.LOG_TOPIC)));
// 循环次数
int foreach = SystemConfig
.getInteger(Constant.CONSUMER_FOREACH_COUNT);
for (int k = 0; k < foreach; k++) {
LOGGER.info("foreach is :" + (k + 1));
// 若无消息则停在这里,一直等待消息
ConsumerRecords<String, String> records = consumer
.poll(Long.MAX_VALUE);
// 消息集合
List<String> recordValues = new ArrayList<String>();
MongoDBUtil mongoDBUtil = new MongoDBUtil();
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records
.records(partition);
if (null == partitionRecords
|| partitionRecords.size() == 0) {
continue;
}
LOGGER.info("get partitionRecords size is :"
+ partitionRecords.size());// 默认500
recordValues.clear();// 清空集合
for (ConsumerRecord<String, String> record : partitionRecords) {
LOGGER.info(
"now consumer the message it's offset is :"
+ record.offset() + " key:"
+ record.key()
+ " and the value is :"
+ record.value());
recordValues.add(record.value());
}
// 插入数据
int result = mongoDBUtil.insertDB(recordValues);
if (result == 1) {
long lastOffset = partitionRecords
.get(partitionRecords.size() - 1).offset();
LOGGER.info("now commit the partition[ "
+ partition.partition() + "] offset");
// 消费消息修改状态
consumer.commitSync(Collections.singletonMap(
partition,
new OffsetAndMetadata(lastOffset + 1)));
}
}
}
} catch (Exception e) {
e.printStackTrace();
LOGGER.error("[KafkaConsumerUtil.execute]" + e.getMessage());
} finally {
if (null != consumer) {
consumer.close();
}
lock.unlock();
}
}
}
/**
* 初始化配置
*
* @return
*/
private KafkaConsumer<String, String> initConsumer() {
Properties props = new Properties();
// kafka地址
props.put("bootstrap.servers",
SystemConfig.getString(Constant.BOOTSTRAP_SERVERS));
// 消费消息组名称
props.put("group.id", SystemConfig.getString(Constant.GROUP_ID));
props.put("enable.auto.commit", "false");
// 设置使用最开始的offset偏移量为该group.id的最早。如果不设置,则会是latest即该topic最新一个消息的offset
// 如果采用latest,消费者只能得道其启动后,生产者生产的消息
props.put("auto.offset.reset", "earliest");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
return new KafkaConsumer<String, String>(props);
}
}
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.log4j.Logger;
/**
* kafka消息端控制层
*
* @author
*
*/
public class KafkaConsumerUtil {
private static final Logger LOGGER = Logger
.getLogger(KafkaConsumerUtil.class);
/**
* 锁
*/
private static ReentrantLock lock = new ReentrantLock();
public void syncPull() {
try {
ScheduledExecutorService ses = Executors
.newScheduledThreadPool(ConstantNumber.NUM_TEN);
// 如果前面的任务还未完成,则调度不会启动。
ses.scheduleWithFixedDelay(new Runnable() {
public void run() {
try {
LOGGER.info("run being:"
+ new SimpleDateFormat(Constant.FORMAT)
.format(new Date()));
execute();
LOGGER.info("run end:"
+ new SimpleDateFormat(Constant.FORMAT)
.format(new Date()));
} catch (Exception e) {
LOGGER.error("syncPull_run_error:" + e.getMessage());
}
}
}, ConstantNumber.NUM_THIRTY,
SystemConfig.getLong(Constant.INTERVAL_TIMER),
TimeUnit.SECONDS);// 启动多少秒后执行,然后周期多少秒执行一次
} catch (Exception e) {
LOGGER.error("syncPull_error:" + e.getMessage());
}
}
/**
* 执行
*/
private void execute() {
if (lock.tryLock()) {
KafkaConsumer<String, String> consumer = null;
try {
consumer = initConsumer();
consumer.subscribe(Arrays
.asList(SystemConfig.getString(Constant.LOG_TOPIC)));
// 循环次数
int foreach = SystemConfig
.getInteger(Constant.CONSUMER_FOREACH_COUNT);
for (int k = 0; k < foreach; k++) {
LOGGER.info("foreach is :" + (k + 1));
// 若无消息则停在这里,一直等待消息
ConsumerRecords<String, String> records = consumer
.poll(Long.MAX_VALUE);
// 消息集合
List<String> recordValues = new ArrayList<String>();
MongoDBUtil mongoDBUtil = new MongoDBUtil();
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records
.records(partition);
if (null == partitionRecords
|| partitionRecords.size() == 0) {
continue;
}
LOGGER.info("get partitionRecords size is :"
+ partitionRecords.size());// 默认500
recordValues.clear();// 清空集合
for (ConsumerRecord<String, String> record : partitionRecords) {
LOGGER.info(
"now consumer the message it's offset is :"
+ record.offset() + " key:"
+ record.key()
+ " and the value is :"
+ record.value());
recordValues.add(record.value());
}
// 插入数据
int result = mongoDBUtil.insertDB(recordValues);
if (result == 1) {
long lastOffset = partitionRecords
.get(partitionRecords.size() - 1).offset();
LOGGER.info("now commit the partition[ "
+ partition.partition() + "] offset");
// 消费消息修改状态
consumer.commitSync(Collections.singletonMap(
partition,
new OffsetAndMetadata(lastOffset + 1)));
}
}
}
} catch (Exception e) {
e.printStackTrace();
LOGGER.error("[KafkaConsumerUtil.execute]" + e.getMessage());
} finally {
if (null != consumer) {
consumer.close();
}
lock.unlock();
}
}
}
/**
* 初始化配置
*
* @return
*/
private KafkaConsumer<String, String> initConsumer() {
Properties props = new Properties();
// kafka地址
props.put("bootstrap.servers",
SystemConfig.getString(Constant.BOOTSTRAP_SERVERS));
// 消费消息组名称
props.put("group.id", SystemConfig.getString(Constant.GROUP_ID));
props.put("enable.auto.commit", "false");
// 设置使用最开始的offset偏移量为该group.id的最早。如果不设置,则会是latest即该topic最新一个消息的offset
// 如果采用latest,消费者只能得道其启动后,生产者生产的消息
props.put("auto.offset.reset", "earliest");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
return new KafkaConsumer<String, String>(props);
}
}