package com.ultrapower.main;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ultrapower.model.*;
import com.ultrapower.util.*;
import com.ultrapower.util.kafka.Producer;
import org.I0Itec.zkclient.ZkClient;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.*;
import org.apache.spark.util.LongAccumulator;
import redis.clients.jedis.Jedis;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* @Author: kai
* @Date: Created in 上午 11:39 2018/7/16
*/
public class BjLogCleanXmlOffset {
private static Log log = LogFactory.getLog(BjLogCleanXmlOffset.class);
public static void main(String[] args) {
// 创建JavaStreamingContext并获取输入流
// 本地使用
// SparkConf conf = new SparkConf().setAppName("BjLogClean").setMaster("local[2]").set("spark.streaming.kafka.consumer.poll.ms", "60000");
// 服务器使用
SparkConf conf = new SparkConf().setAppName("BjLogClean").set("spark.streaming.kafka.consumer.poll.ms", "60000");
JavaStreamingContext jsc = new JavaStreamingContext(conf, new Duration(CoreConfig.SPARK_TIMEINTEVAL));
// 创建广播变量 取每条日志中的某些key作为存放在reids中的key
Broadcast<String[]> logFieldsKey = jsc.sparkContext().broadcast(new String[]{"uuid"});
// 创建累加器 记录从kafka中接收到的总数据
final LongAccumulator dataFromKafka = jsc.sparkContext().sc().longAccumulator("Records pulled from kafka");
// 记录环节标识不合法的数据
final LongAccumulator dataIllegalLink = jsc.sparkContext().sc().longAccumulator("Records of Illegal link");
// 记录环节标识合法的数据
final LongAccumulator dataLink = jsc.sparkContext().sc().longAccumulator("Records of link");
// 记录环节合并的日志数
final LongAccumulator dataMergeLink = jsc.sparkContext().sc().longAccumulator("Records of Merge link");
// 记录redis中超时删除的环节数
final LongAccumulator dataTimeOutReids = jsc.sparkContext().sc().longAccumulator("Records of Redis Timeout Link");
// 记录输出的环节数
final LongAccumulator dataOutput = jsc.sparkContext().sc().longAccumulator("Records output to kafka");
// 完整的日志量
final LongAccumulator dataFullOutput = jsc.sparkContext().sc().longAccumulator("Full data");
// 不完整的日志量
final LongAccumulator dataNotFullOutput = jsc.sparkContext().sc().longAccumulator("Not Full data");
// 完整但不属于所需环节
final LongAccumulator dataFullNotBelongToLink = jsc.sparkContext().sc().longAccumulator("Full But Doesn't belong to links data");
final LongAccumulator streamapCount = jsc.sparkContext().sc().longAccumulator("stream count");
// 广播brokerlist
final Broadcast<String> brokerListBroadcast = jsc.sparkContext().broadcast(CoreConfig.KAFKA_IP);
// 广播topic
final Broadcast<String> topicBroadcast = jsc.sparkContext().broadcast(CoreConfig.KAFKA_SPARKTOTOPIC);
//广播环节数
final Broadcast<Object[]> linksBroadcast = jsc.sparkContext().broadcast(BusInfoXmlConfig.busLinkLogoS);
JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaTopicsOffsetTool.createDirectStream(jsc);
stream.repartition(36);
JavaDStream<String> streamMap = stream.map(new Function<ConsumerRecord<String, String>, String>() {
@Override
public String call(ConsumerRecord<String, String> stringStringConsumerRecord) throws Exception {
// if(!stringStringConsumerRecord.value().trim().isEmpty()){
dataFromKafka.add(1l);
// }
return stringStringConsumerRecord.value();
}
});
// 在此处cache避免该rdd被计算多次
streamMap.cache();
// 将完整日志过滤出来
JavaDStream<String> fullBus = streamMap.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String bus) throws Exception {
Boolean fullflag = false;
if(bus!=null && !bus.trim().isEmpty() && bus.contains("splitflag") && JsonUtils.isJson(bus)){
JSONObject jsonObject = JSON.parseObject(bus);
String splitflag = jsonObject.get("splitflag").toString();
if (splitflag.equals("1-1")) {
fullflag = true;
dataFullOutput.add(1l);
}
}
return fullflag;
}
});
// fullBus.cache();
// 完整日志的有效字段抽取并传入到kafka中
// getEffectField(fullBus,dataIllegalLink,dataOutput,dataFromKafka,dataMergeLink,dataTimeOutReids,dataFullOutput,dataNotFullOutput);
// 将错误日志过滤出来
JavaDStream<String> errBusData = streamMap.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String bus) throws Exception {
Boolean fullflag = false;
if (bus == null ||bus.trim().isEmpty()||!bus.contains("splitflag") || !JsonUtils.isJson(bus)) {
fullflag = true;
}
return fullflag;
}
});
log.info("开始将为空不是Json等错误日志写入本地文件");
// 将错误日志写入到本地文件
writeErrdataToFile(errBusData,dataIllegalLink);
log.info("完成将为空不是Json等错误日志写入本地文件");
// 将不完整日志过滤出来
JavaDStream<String> notFullBus = streamMap.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String bus) throws Exception {
Boolean fullflag = false;
if(bus!=null && !bus.trim().isEmpty() && bus.contains("splitflag") && JsonUtils.isJson(bus)){
JSONObject jsonObject = JSON.parseObject(bus);
String splitflag = jsonObject.get("splitflag").toString();
if (!splitflag.equals("1-1")) {
fullflag = true;
dataNotFullOutput.add(1l);
}
}
return fullflag;
}
});
// 创建hashmap,用于存放不完整日志的Key和时间
HashMap<Long, String> redisTimeOutMap = new HashMap<>();
log.info("开始处理不完整日志");
// 对不完整日志进行处理变成完整日志
JavaDStream<String> fullBusAfterDeal = notFullBus.map(new Function<String, String>() {
@Override
public String call(String bus) throws Exception {
String fullBus = "";
// 将每一条数据转换为json对象
JSONObject jsonObject = JSON.parseObject(bus);
String splitflag = jsonObject.get("splitflag").toString();
// 获取该条信息中的message
String message = jsonObject.get("msg").toString();
// 获取该日志一共几段
String[] split = splitflag.split("-");
Long logCount = Long.parseLong(split[0]);
// 获取日志Key
StringBuffer logKey = new StringBuffer();
String[] logFields = logFieldsKey.value();
for (int i = 0; i < logFields.length; i++) {
// 打印错误日志
// log.info("报错日志"+jsonObject.toString());
// System.out.println("报错日志"+jsonObject.toString());
logKey.append(jsonObject.get(logFields[i]).toString());
}
// 将日志的key,段号,内容存放到redis中
Jedis jedis = null;
try {
JavaRedisClient.makePool(CoreConfig.REDIS_IP, CoreConfig.REDIS_PORT, CoreConfig.REDIS_TIMEOUT, CoreConfig.REDIS_MAXTOTAL, 100, 10);
jedis = JavaRedisClient.getPool().getResource();
jedis.hset(logKey.toString(), splitflag, message);
// 将当前时间和key存放到map中
redisTimeOutMap.put(System.currentTimeMillis(),logKey.toString());
// 获取此时redis中该key中的日志个数
Long redisLogCount = 0l;
redisLogCount = jedis.hlen(logKey.toString());
// 日志总数和redis中存放的日志数相等则取出拼接成一条
if (logCount == redisLogCount) {
StringBuffer logMessage = new StringBuffer();
for (int i = 1; i <= logCount; i++) {
String mess = jedis.hget(logKey.toString(), logCount + "-" + i);
logMessage.append(mess);
}
// 将拼接好的message放入原日志中
jsonObject.put("msg", logMessage.toString());
// 修改该条日志的splitflag为1-1
jsonObject.put("splitflag", "1-1");
fullBus = jsonObject.toString();
// 删除redis中的此日志
jedis.del(logKey.toString());
// 释放redis连接
// jedis.close();
dataMergeLink.add(logCount - 1);
// streamapCount.add(1l);
}
// else {
// 释放redis连接
// jedis.close();
// }
// 遍历map,查看那些键超时了,如果超时了,则在redis中删除此键并做计数
String[] unitAndNumber = CoreConfig.REDIS_DATATIMEOUT.split(",");
for (Iterator<Map.Entry<Long, String>> iterator = redisTimeOutMap.entrySet().iterator(); iterator.hasNext(); ) {
Long time = iterator.next().getKey();
// 如果当前时间大于该时间加上过期时间,则该时间所对应的key过期
if (System.currentTimeMillis() > TimeUtil.timeStampAtfertAdd(time, unitAndNumber[0], Integer.parseInt(unitAndNumber[1]))) {
// 在redis中删除此key
jedis.del(redisTimeOutMap.get(time));
// 计数
// dataTimeOutReids.add(1l);
// 在map中删除此元素
iterator.remove();
}
}
// 释放redis连接
jedis.close();
}catch (Exception e) {
log.warn("redis连接异常" + bus + e);
}
return fullBus;
}
});
log.info("完成处理不完整日志");
JavaDStream<String> allFullBus = fullBus.union(fullBusAfterDeal);
allFullBus.cache();
// 不完整日志转换为完整日志的有效字段抽取并传入到kafka中
getEffectField(allFullBus,dataIllegalLink,dataOutput,dataFromKafka,dataMergeLink,dataTimeOutReids,dataFullOutput,dataNotFullOutput, brokerListBroadcast,topicBroadcast,linksBroadcast,dataFullNotBelongToLink,dataLink,streamapCount);
// 将offset保存到zookeeper中
writeOffsetToZk(stream);
Runnable runnable = new Runnable() {
@Override
public void run() {
// 将记录的数据写入文件中
WriteTextUtil.busErrtoFile(CoreConfig.BUS_COUNT,"busCount",System.currentTimeMillis()+":kafka接入数:"+dataFromKafka.value()+"完整日志数:"+dataFullOutput.value()+"不完整日志数:"+dataNotFullOutput.value()+"非法日志数:"+dataIllegalLink.value());
}
};
ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor();
// 第二个参数为首次执行的延时时间,第三个参数为定时执行的间隔时间
service.scheduleAtFixedRate(runnable, 0, Long.parseLong(CoreConfig.COUNT_PERIOD), TimeUnit.SECONDS);
jsc.start();
try {
jsc.awaitTermination();
} catch (Exception e) {
e.printStackTrace();
}
jsc.close();
}
// 有效字段抽取
public static void getEffectField(JavaDStream<String> jds,LongAccumulator dataIllegalLink,LongAccumulator dataOutput,LongAccumulator dataFromKafka,LongAccumulator dataMergeLink,LongAccumulator dataTimeOutReids,LongAccumulator dataFullOutput,LongAccumulator dataNotFullOutput,Broadcast<String> brokerListBroadcast,Broadcast<String> topicBroadcast,Broadcast<Object[]> linksBroadcast,LongAccumulator dataFullNotBelongToLink,LongAccumulator dataLink,LongAccumulator streamapCount){
// jds中有些值有可能为null,所以要进行null判断
// 统计过滤掉的日志的数量并存入文件中
writeErrdataToFile(jds,dataLink);
JavaDStream<String> emptyNum = jds.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String s) throws Exception {
Boolean isEmpty = false;
if (s.trim().isEmpty()) {
isEmpty = true;
}
return isEmpty;
}
});
writeErrdataToFile(emptyNum,streamapCount);
JavaDStream<String> errorBus = jds.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String fullBus) throws Exception {
Boolean isLinkBusFlag = false;
if (!fullBus.trim().isEmpty()) {
// 获取各环节日志唯一标识
// Object[] linksFlag = BusInfoXmlConfig.busLinkLogoS;
Object[] linksFlag = linksBroadcast.getValue();
// 创建一个集合,用于统计这条日志是否是属于所有环节中的一种
ArrayList<Integer> isLinkList = new ArrayList<>();
for (int i = 0; i < linksFlag.length; i++) {
Boolean isLink = true;
String[] linkFlags = linksFlag[i].toString().split("\\|\\|");
for (int j = 0; j < linkFlags.length; j++) {
if (!fullBus.contains(linkFlags[j])) {
isLink = false;
break;
}
}
if (isLink) {
// 如果为true,则在集合中添加0
isLinkList.add(0);
} else {
// 如果不为true,则在集合中添加1
isLinkList.add(1);
}
}
// 判断集合来判断此条日志是否为过滤掉的日志
// 如果包含0即为有效日志,否则为需要过滤掉的日志
if (!isLinkList.contains(0)) {
isLinkBusFlag = true;
}
}
return isLinkBusFlag;
}
});
log.info("开始将非所需要环节的错误日志写入本地文件");
// 将错误日志存放到文件中 只存完整环节数中的错误环节日志 不完整的日志长度太长了,不建议记录
writeErrdataToFile(errorBus,dataFullNotBelongToLink);
log.info("完成将非所需要环节的错误日志写入本地文件");
// 通过日志唯一标识过滤出各环节的日志
// Object[] linksFlag = BusInfoXmlConfig.busLinkLogoS;
Object[] linksFlag = linksBroadcast.getValue();
for (int i = 0; i < linksFlag.length; i++) {
int finalI = i;
// 埋点
log.info("开始对"+BusInfoXmlConfig.busLinkNameS[finalI]+"环节抽取字段");
JavaDStream<String> busLink = jds.filter(new Function<String, Boolean>() {
@Override
public Boolean call(String business) throws Exception {
Boolean IsLinkFlag = true;
String[] linkFlags = linksFlag[finalI].toString().split("\\|\\|");
for (int j = 0; j < linkFlags.length; j++) {
if(!business.contains(linkFlags[j]) || business==null || business.trim().isEmpty()){
IsLinkFlag = false;
break;
}
}
return IsLinkFlag;
}
});
// 对各环节日志抽取有效字段
JavaDStream<String> linkNew = busLink.map(new Function<String, String>() {
@Override
public String call(String business) throws Exception {
// 将每一条数据转换为json对象
JSONObject busObject = JSON.parseObject(business);
// 获取message信息
String msg = "";
try{
msg = busObject.get("msg").toString();
}catch (Exception e){
log.warn("msg为空"+e);
}
String message = "";
if(msg.contains("\"message\":")){
message = LogCutUtil.getValueOfJson(msg,"\"message\"");
}else{
message = msg;
}
HashMap<String, Object> linkMap = new HashMap<>();
// 获取到该环节下需要抽取的字段
// String[] linksField = BusInfoConfig.LINK_FIELD.split(",");
Object[] linksField = BusInfoXmlConfig.busLinkFieldS;
String[] linkFields = linksField[finalI].toString().split("&&&");
for (int j = 0; j < linkFields.length; j++) {
// 获取到单个字段并对字段进行处理
String[] linkField = linkFields[j].split("\\|\\|");
if(linkField[0].equals("1")){
String effecField = LogCutUtil.getEffecField(message, linkField[2], linkField[3]);
if(linkField.length == 4){
linkMap.put(linkField[1],effecField);
}else if(linkField.length == 5){
if(effecField.equals(linkField[4])){
linkMap.put(linkField[1],1);
}else{
linkMap.put(linkField[1],0);
}
}else{
log.info(linkField[1]+"参数个数传递错误");
}
}else if(linkField[0].equals("2")){
String timeBeforeKey = LogCutUtil.getTimeStampBeforeKey(message, linkField[2]);
// Long timeStampBeforeKey = LogCutUtil.dateToTimeStamp(timeBeforeKey);
linkMap.put(linkField[1],timeBeforeKey);
}else if(linkField[0].equals("3")){
Long costTime = 0l;
// 获取开始时间并转换为微秒
try{
Long startTime = LogCutUtil.dateToTimeStamp(linkMap.get("startTime").toString());
linkMap.put("startTime",startTime);
// 获取结束时间并转换为微秒
Long endTime = LogCutUtil.dateToTimeStamp(linkMap.get("endTime").toString());
linkMap.put("endTime",endTime);
costTime = endTime - startTime;
}catch (Exception e){
log.warn("耗时计算异常"+e);
}
linkMap.put(linkField[1],costTime);
}else if(linkField[0].equals("4")){
String valueOfKey = LogCutUtil.getValueOfKey(message, linkField[2]);
if(linkField.length == 3){
linkMap.put(linkField[1],valueOfKey);
}else if(linkField.length == 4){
if(valueOfKey.equals(linkField[3])){
linkMap.put(linkField[1],1);
}else{
linkMap.put(linkField[1],0);
}
}else{
log.info(linkField[1]+"参数个数传递错误");
}
}else if(linkField[0].equals("5")){
String valueOfKey = LogCutUtil.getValueOfJson(message, linkField[2]);
if(linkField.length == 3){
// 解决同个Key,取字段不同的问题,只取有值的
if(linkMap.keySet().contains(linkField[1])){
Object o = linkMap.get(linkField[1]);
if (o==null || o.equals(" ")){
linkMap.put(linkField[1],valueOfKey);
}
}else {
linkMap.put(linkField[1],valueOfKey);
}
}else if(linkField.length == 4){
System.out.println(valueOfKey + linkField[3]);
if(valueOfKey.equals(linkField[3])){
linkMap.put(linkField[1],1);
}else{
linkMap.put(linkField[1],0);
}
}else{
log.info(linkField[1]+"参数个数传递错误");
}
}else if(linkField[0].equals("6")) {
String status = LogCutUtil.getValueOfStatus(message, linkField[2]);
if(status.equals(linkField[3])){
linkMap.put(linkField[1],1);
}else{
linkMap.put(linkField[1],0);
}
}else if(linkField[0].equals("7")) {
String jointTime = LogCutUtil.getJointTime(busObject, message, linkField[2],linkField[3]);
linkMap.put(linkField[1],jointTime);
}else if(linkField[0].equals("8")){
String v1 = LogCutUtil.getEffecField(message,linkField[2],linkField[3]);
String v2 = LogCutUtil.getEffecField(message,linkField[4],linkField[5]);
linkMap.put(linkField[1],v1+"_"+v2);
}else if(linkField[0].equals("9")){
String[] mess = message.split("\\|",-1);
if(linkField.length == 3){
if(linkField[1].equals("startTime")){
linkMap.put(linkField[1],LogCutUtil.stringToTimeStap(mess[Integer.parseInt(linkField[2])]));
}else if (linkField[1].equals("endTime")){
linkMap.put(linkField[1],LogCutUtil.stringToTimeStap(mess[Integer.parseInt(linkField[2])]));
}else{
linkMap.put(linkField[1],mess[Integer.parseInt(linkField[2])]);
}
}else if(linkField.length == 4){
if(mess[Integer.parseInt(linkField[2])].equals(linkField[3])){
linkMap.put(linkField[1],1);
}else{
linkMap.put(linkField[1],0);
}
}else{
log.info(linkField[1]+"参数个数传递错误");
}
} else {
log.warn(linkField[0]+"标识符错误");
}
}
// 获取busobject中的除splitflag之外的键值放入linkmap中
Set<String> keys = busObject.keySet();
for (String key:keys
) {
if(!key.equals("splitflag")){
// if(key.equals("msg")){
// String msgSub = busObject.get("msg").toString();
// if(msgSub.length()>100){
// String substring = msgSub.substring(0, 100);
// linkMap.put("msg",substring);
// }
// }else{
linkMap.put(key,busObject.get(key));
// }
}
}
// 对该条日志打上业务字段
// linkMap.put("busName",BusInfoXmlConfig.busNameS[0]);
// 对该条日志打上环节字段
linkMap.put("stageName",BusInfoXmlConfig.busLinkNameS[finalI]);
String linkJson = JSONObject.toJSONString(linkMap);
return linkJson;
}
});
// 埋点
log.info(BusInfoXmlConfig.busLinkNameS[finalI]+"环节字段抽取完成");
log.info(BusInfoXmlConfig.busLinkNameS[finalI]+"环节开始入kafka");
// 将数据传入到kafka中
dataToKafka(linkNew,dataOutput,dataFromKafka,dataIllegalLink,dataMergeLink,dataTimeOutReids,dataFullOutput,dataNotFullOutput,brokerListBroadcast,topicBroadcast,dataFullNotBelongToLink,dataLink,streamapCount);
log.info(BusInfoXmlConfig.busLinkNameS[finalI]+"环节完成入kafka");
}
}
// 将JavaDstream中的数据输入到kafka中
public static void dataToKafka(JavaDStream notNullBus,LongAccumulator dataOutput,LongAccumulator dataFromKafka,LongAccumulator dataIllegalLink,LongAccumulator dataMergeLink,LongAccumulator dataTimeOutReids,LongAccumulator dataFullOutput,LongAccumulator dataNotFullOutput,Broadcast<String> brokerListBroadcast,Broadcast<String> topicBroadcast,LongAccumulator dataFullNotBelongToLink,LongAccumulator dataLink,LongAccumulator streamapCount){
notNullBus.foreachRDD(new VoidFunction<JavaRDD<String>>() {
public void call(final JavaRDD<String> v1) throws Exception {
dataOutput.add(v1.count());
v1.foreachPartition(new VoidFunction<Iterator<String>>() {
public void call(Iterator<String> stringIterator) throws Exception {
while (stringIterator.hasNext()) {
Producer.send(CoreConfig.KAFKA_SPARKTOTOPIC, stringIterator.next());
}
// 记录输出的总数
dataOutput.add(0l);
dataFromKafka.add(0l);
dataIllegalLink.add(0l);
dataMergeLink.add(0l);
dataTimeOutReids.add(0l);
dataFullOutput.add(0l);
dataNotFullOutput.add(0l);
dataFullNotBelongToLink.add(0l);
dataLink.add(0l);
streamapCount.add(0l);
}
});
}
});
}
/**
* 将错误日志写入到文件中
* @param errorBus
*/
public static void writeErrdataToFile(JavaDStream<String> errorBus,LongAccumulator dataIllegalLink){
// 将错误日志存放到文件中
errorBus.foreachRDD(new VoidFunction<JavaRDD<String>>() {
@Override
public void call(JavaRDD<String> stringJavaRDD) throws Exception {
// 取样,取出一条数据记录
long count = stringJavaRDD.count();
//记录错误日志条数
dataIllegalLink.add(count);
List<String> take = stringJavaRDD.take(1);
WriteTextUtil.busListErrtoFile(CoreConfig.ERROR_LOG,"busLinkError",take);
}
});
}
// public static void writeOffsetToZk(JavaInputDStream<ConsumerRecord<String, String>> stream,String zkTopicPath){
public static void writeOffsetToZk(JavaInputDStream<ConsumerRecord<String, String>> stream){
stream.foreachRDD(new VoidFunction<JavaRDD<ConsumerRecord<String, String>>>() {
@Override
public void call(JavaRDD<ConsumerRecord<String, String>> consumerRecordJavaRDD) throws Exception {
final OffsetRange[] offsets = ((HasOffsetRanges) consumerRecordJavaRDD.rdd()).offsetRanges();
consumerRecordJavaRDD.foreachPartition(new VoidFunction<Iterator<ConsumerRecord<String, String>>>() {
@Override
public void call(Iterator<ConsumerRecord<String, String>> consumerRecordIterator) throws Exception {
//更新offset
ZkClient zkClient = new ZkClient(CoreConfig.ZK_SERVERS, CoreConfig.ZK_SESSION_TIMEOUT,
CoreConfig.ZK_CONNECTION_TIMEOUT, new MyZkSerializer());
for (OffsetRange o : offsets) {
// String zkPath = zkTopicPath + "/" + o.topic()+ "/"+o.partition();
String zkPath = CoreConfig.ZK_KAFKA_OFFSET_NAME + "/" + o.topic()+ "/"+o.partition();
KafkaOffsetUtil.createZkNode(zkClient, zkPath);
zkClient.writeData(zkPath, o.fromOffset());
log.info("Update offset data: topic["+o.topic()+"]partition[" + o.partition() + "], offset= " + o.fromOffset());
}
// 关闭连接
zkClient.close();
}
});
}
});
}
}