kafka个人基础教程(四)java demo消费者生产者篇

  1. KafkaDemoConsumer 消费者
package com.skindow.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.streams.StreamsConfig;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * Created by Administrator on 2019/8/13.
 * 模拟生产者
 */
@Slf4j
public class KafkaDemoProducer implements Runnable{
    //生产者唯一标示
    private String id;
    //生产主题
    private String toptic;
    //kafka服务器地址和端口
    private String service;
    //配置属性
    private Properties props;
    private volatile Boolean flag = true;
    //生产消息数量
    private AtomicInteger i = new AtomicInteger(0);
    //生产间隔
    private long times;
    private KafkaProducer kafkaProducer;
    private CountDownLatch latch;
    public KafkaDemoProducer(Builder builder){
        this.toptic = builder.toptic;
        this.service = builder.service;
        this.id = builder.id;
        this.times = builder.times;
        this.latch = builder.latch;
        this.initProperties();
        kafkaProducer = new KafkaProducer<String, String>(props);
    }
    public void stop()
    {
        kafkaProducer.flush();
        kafkaProducer.close();
        flag = false;
    }
    public KafkaProducer<String,String> getKafkaProducer()
    {
        return kafkaProducer;
    }
    public Properties getProperties()
    {
        return props;
    }
    public void initProperties()
    {
        props = new Properties();
        //程序的唯一标识符以区别于其他应用程序与同一Kafka集群通信
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, this.id);
        //用于建立与Kafka集群的初始连接的主机/端口对的列表
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.service);
        //提供键值对持久化的序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        //同步机制,0表示客户端把消息发送就成功了,1表示在0基础上leader把消息保存在本地磁盘上就表示成功了,不w管follower有没有同步成功
        //all表示在1基础上follower同步成功才算成功
        props.put("acks", "all");
        //在执行不彻底的关机之前,可以成功执行关机的命令数。
        props.put("retries", 0);
        //producer将试图批处理消息记录,以减少请求次数。这将改善client与server之间的性能。这项配置控制默认的批量处理消息字节数。
        props.put("batch.size", 16384);
        //producer组将会汇总任何在请求与发送之间到达的消息记录一个单独批量的请求,producer将会等待给定的延迟时间以允许其他消息记录发送,这些消息记录可以批量处理
        //这项设置设定了批量处理的更高的延迟边界
        props.put("linger.ms", 1);
        //producer可以用来缓存数据的内存大小
        props.put("buffer.memory", 33554432);
    }
   public static class Builder{
        private String toptic;
        private String service;
        private String id;
        private long times;
        private CountDownLatch latch;
        public Builder toptic(String toptic)
        {
            this.toptic = toptic;
            return this;
        }
        public Builder latch(CountDownLatch latch)
        {
            this.latch = latch;
            return this;
        }
        public Builder service(String service)
        {
            this.service = service;
            return this;
        }
        public Builder id(String id)
        {
            this.id = id;
            return this;
        }
        public Builder times(long times)
        {
            this.times = times;
            return this;
        }
        public KafkaDemoProducer builder()
       {
            return new KafkaDemoProducer(this);
        }
    }
    @Override
    public void run() {
        while (flag)
        {
            log.info(id + "开始生产主题"+ toptic);
            try {
                Thread.sleep(times);
            } catch (InterruptedException e) {
                log.error("Thread sleep error: ",e);
            }
            String key = id + "-key-" + i;
            String value = id + " Under production" + i;
            kafkaProducer.send(new ProducerRecord<String,String>(toptic, key, value), new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    Date date = new Date(recordMetadata.timestamp());
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                    String format = sdf.format(date);
                    log.info(id + "于"+ format + "生产"+ key +"完成");
                    i.incrementAndGet();
                    log.info(id + "已成功生产" + (recordMetadata.offset() + 1) + "条消息");
                    latch.countDown();
                }
            });

        }
    }
}

  1. KafkaDemoProducer 生产者
package com.skindow.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.streams.StreamsConfig;

import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * Created by Administrator on 2019/8/13.
 */
@Slf4j
public class KafkaDemoConsumer implements Runnable{
    //消费者唯一标示
    private String id;
    //消费主题
    private String toptic;
    //kafka服务器地址和端口
    private String service;
    //配置属性
    private Properties props;
    private volatile Boolean flag = true;
    //消费者消息数量
    private AtomicInteger i = new AtomicInteger(0);
    //消费间隔
    private long times;
    private KafkaConsumer<String, String> kafkaConsumer;
    public KafkaDemoConsumer(Builder builder){
        this.toptic = builder.toptic;
        this.service = builder.service;
        this.id = builder.id;
        this.times = builder.times;
        this.initProperties();
        kafkaConsumer =  new KafkaConsumer<String, String>(props);
    }
    public KafkaConsumer<String,String> getKafkaConsumer()
    {
        return kafkaConsumer;
    }
    public void initProperties()
    {
        props = new Properties();
        //程序的唯一标识符以区别于其他应用程序与同一Kafka集群通信
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, this.id);
        //用于建立与Kafka集群的初始连接的主机/端口对的列表
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.service);
        //用来唯一标识consumer进程所在组的字符串
        props.put("group.id", "skindow");
        //如果为真,consumer所fetch的消息的offset将会自动的同步到zookeeper
        props.put("enable.auto.commit", "true");
        //consumer向zookeeper提交offset的频率,单位是秒
        props.put("auto.commit.interval.ms", "1000");
        //提供键值对的反序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    }

    public static class Builder{
        private String toptic;
        private String service;
        private String id;
        private long times;
        public Builder toptic(String toptic)
        {
            this.toptic = toptic;
            return this;
        }
        public Builder service(String service)
        {
            this.service = service;
            return this;
        }
        public Builder id(String id)
        {
            this.id = id;
            return this;
        }
        public Builder times(long times)
        {
            this.times = times;
            return this;
        }
        public KafkaDemoConsumer builder()
        {
            return new KafkaDemoConsumer(this);
        }
    }
    public void stop()
    {
        kafkaConsumer.close();
        flag = false;
    }
    @Override
    public void run() {
        while (flag) {
            log.info(id + "开始消费主题" + toptic);
            try {
                Thread.sleep(times);
            } catch (InterruptedException e) {
                log.error("Thread sleep error: ", e);
            }
            kafkaConsumer.subscribe(Arrays.asList(toptic));
            Duration duration = Duration.ofMillis(500);
            //频率
            ConsumerRecords<String, String> poll = kafkaConsumer.poll(duration);
            for (ConsumerRecord<String, String> record : poll)
            {
                log.info("=========消化中===========");
                log.info(id + " 消费者offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
            }
        }
    }
}

  1. KafkaMain 测试
package com.skindow.kafka;

import lombok.extern.slf4j.Slf4j;

import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * Created by Administrator on 2019/8/13.
 */
@Slf4j
public class KafkaMain
{
    public static void main(String[] args)
    {
        CountDownLatch latch = new CountDownLatch(1);
        KafkaDemoProducer pro_test_1 = new KafkaDemoProducer.Builder().id("skindow-pro").service("192.168.50.67:9092").times(5000).toptic("skindow-test").latch(latch).builder();
        KafkaDemoConsumer con_test_1 = new KafkaDemoConsumer.Builder().id("skindow-con").service("192.168.50.67:9092").times(5000).toptic("skindow-test").builder();
        ExecutorService cachedThreadPool = Executors.newFixedThreadPool(1);
        cachedThreadPool.execute(pro_test_1);
        try {
            latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        log.info("======================");
        log.info("开始执行消费线程");
        ExecutorService cachedThreadPool_1 = Executors.newFixedThreadPool(1);
        cachedThreadPool_1.execute(con_test_1);
    }
}

打印日志如下

"C:\Program Files\Java\jdk1.8.0_131\bin\java" -agentlib:jdwp=transport=dt_socket,address=127.0.0.1:50040,suspend=y,server=n -Dfile.encoding=UTF-8 -classpath "C:\Program Files\Java\jdk1.8.0_131\jre\lib\charsets.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\deploy.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\access-bridge-64.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\cldrdata.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\dnsns.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\jaccess.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\jfxrt.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\localedata.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\nashorn.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\sunec.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\sunjce_provider.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\sunmscapi.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\sunpkcs11.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\ext\zipfs.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\javaws.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\jce.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\jfr.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\jfxswt.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\jsse.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\management-agent.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\plugin.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\resources.jar;C:\Program Files\Java\jdk1.8.0_131\jre\lib\rt.jar;D:\java_project\myfirstSpringboot\skindow-parent\skindow-provider\target\classes;D:\java_project\myfirstSpringboot\skindow-parent\skindow-api\target\classes;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-starter\2.1.7.RELEASE\spring-boot-starter-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot\2.1.7.RELEASE\spring-boot-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-autoconfigure\2.1.7.RELEASE\spring-boot-autoconfigure-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-starter-logging\2.1.7.RELEASE\spring-boot-starter-logging-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\ch\qos\logback\logback-classic\1.2.3\logback-classic-1.2.3.jar;C:\Users\Administrator\.m2\repository\ch\qos\logback\logback-core\1.2.3\logback-core-1.2.3.jar;C:\Users\Administrator\.m2\repository\org\apache\logging\log4j\log4j-to-slf4j\2.11.2\log4j-to-slf4j-2.11.2.jar;C:\Users\Administrator\.m2\repository\org\apache\logging\log4j\log4j-api\2.11.2\log4j-api-2.11.2.jar;C:\Users\Administrator\.m2\repository\org\slf4j\jul-to-slf4j\1.7.26\jul-to-slf4j-1.7.26.jar;C:\Users\Administrator\.m2\repository\javax\annotation\javax.annotation-api\1.3.2\javax.annotation-api-1.3.2.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-core\5.1.9.RELEASE\spring-core-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-jcl\5.1.9.RELEASE\spring-jcl-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\yaml\snakeyaml\1.23\snakeyaml-1.23.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-starter-web\2.1.7.RELEASE\spring-boot-starter-web-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-starter-json\2.1.7.RELEASE\spring-boot-starter-json-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\datatype\jackson-datatype-jsr310\2.9.9\jackson-datatype-jsr310-2.9.9.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\module\jackson-module-parameter-names\2.9.9\jackson-module-parameter-names-2.9.9.jar;C:\Users\Administrator\.m2\repository\org\springframework\boot\spring-boot-starter-tomcat\2.1.7.RELEASE\spring-boot-starter-tomcat-2.1.7.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\apache\tomcat\embed\tomcat-embed-core\9.0.22\tomcat-embed-core-9.0.22.jar;C:\Users\Administrator\.m2\repository\org\apache\tomcat\embed\tomcat-embed-el\9.0.22\tomcat-embed-el-9.0.22.jar;C:\Users\Administrator\.m2\repository\org\apache\tomcat\embed\tomcat-embed-websocket\9.0.22\tomcat-embed-websocket-9.0.22.jar;C:\Users\Administrator\.m2\repository\org\hibernate\validator\hibernate-validator\6.0.17.Final\hibernate-validator-6.0.17.Final.jar;C:\Users\Administrator\.m2\repository\javax\validation\validation-api\2.0.1.Final\validation-api-2.0.1.Final.jar;C:\Users\Administrator\.m2\repository\org\jboss\logging\jboss-logging\3.3.2.Final\jboss-logging-3.3.2.Final.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\classmate\1.4.0\classmate-1.4.0.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-web\5.1.9.RELEASE\spring-web-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-webmvc\5.1.9.RELEASE\spring-webmvc-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-aop\5.1.9.RELEASE\spring-aop-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-expression\5.1.9.RELEASE\spring-expression-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\projectlombok\lombok\1.18.8\lombok-1.18.8.jar;C:\Users\Administrator\.m2\repository\com\alibaba\dubbo\2.5.7\dubbo-2.5.7.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-context\5.1.9.RELEASE\spring-context-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\springframework\spring-beans\5.1.9.RELEASE\spring-beans-5.1.9.RELEASE.jar;C:\Users\Administrator\.m2\repository\org\javassist\javassist\3.20.0-GA\javassist-3.20.0-GA.jar;C:\Users\Administrator\.m2\repository\org\jboss\netty\netty\3.2.5.Final\netty-3.2.5.Final.jar;C:\Users\Administrator\.m2\repository\org\apache\zookeeper\zookeeper\3.4.9\zookeeper-3.4.9.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-api\1.7.26\slf4j-api-1.7.26.jar;C:\Users\Administrator\.m2\repository\jline\jline\0.9.94\jline-0.9.94.jar;C:\Users\Administrator\.m2\repository\io\netty\netty\3.10.5.Final\netty-3.10.5.Final.jar;C:\Users\Administrator\.m2\repository\com\101tec\zkclient\0.9\zkclient-0.9.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-log4j12\1.7.26\slf4j-log4j12-1.7.26.jar;C:\Users\Administrator\.m2\repository\log4j\log4j\1.2.15\log4j-1.2.15.jar;C:\Users\Administrator\.m2\repository\javax\mail\mail\1.4\mail-1.4.jar;C:\Users\Administrator\.m2\repository\javax\activation\activation\1.1\activation-1.1.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\core\jackson-databind\2.9.9\jackson-databind-2.9.9.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\core\jackson-annotations\2.9.0\jackson-annotations-2.9.0.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\core\jackson-core\2.9.9\jackson-core-2.9.9.jar;C:\Users\Administrator\.m2\repository\com\fasterxml\jackson\datatype\jackson-datatype-jdk8\2.9.9\jackson-datatype-jdk8-2.9.9.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\kafka-streams\2.3.0\kafka-streams-2.3.0.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\connect-json\2.0.1\connect-json-2.0.1.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\connect-api\2.0.1\connect-api-2.0.1.jar;C:\Users\Administrator\.m2\repository\org\rocksdb\rocksdbjni\5.18.3\rocksdbjni-5.18.3.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\kafka-clients\2.3.0\kafka-clients-2.3.0.jar;C:\Users\Administrator\.m2\repository\com\github\luben\zstd-jni\1.4.0-1\zstd-jni-1.4.0-1.jar;C:\Users\Administrator\.m2\repository\org\lz4\lz4-java\1.6.0\lz4-java-1.6.0.jar;C:\Users\Administrator\.m2\repository\org\xerial\snappy\snappy-java\1.1.7.3\snappy-java-1.1.7.3.jar;C:\Users\Administrator\.m2\repository\com\alibaba\fastjson\1.2.58\fastjson-1.2.58.jar;D:\Program Files\JetBrains\IntelliJ IDEA 2017.1.4\lib\idea_rt.jar" com.skindow.kafka.KafkaMain
Connected to the target VM, address: '127.0.0.1:50040', transport: 'socket'
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/C:/Users/Administrator/.m2/repository/ch/qos/logback/logback-classic/1.2.3/logback-classic-1.2.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/C:/Users/Administrator/.m2/repository/org/slf4j/slf4j-log4j12/1.7.26/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [ch.qos.logback.classic.util.ContextSelectorStaticBinder]
16:43:04.595 [main] INFO org.apache.kafka.clients.producer.ProducerConfig - ProducerConfig values: 
	acks = all
	batch.size = 16384
	bootstrap.servers = [192.168.50.67:9092]
	buffer.memory = 33554432
	client.dns.lookup = default
	client.id = 
	compression.type = none
	connections.max.idle.ms = 540000
	delivery.timeout.ms = 120000
	enable.idempotence = false
	interceptor.classes = []
	key.serializer = class org.apache.kafka.common.serialization.StringSerializer
	linger.ms = 1
	max.block.ms = 60000
	max.in.flight.requests.per.connection = 5
	max.request.size = 1048576
	metadata.max.age.ms = 300000
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
	receive.buffer.bytes = 32768
	reconnect.backoff.max.ms = 1000
	reconnect.backoff.ms = 50
	request.timeout.ms = 30000
	retries = 0
	retry.backoff.ms = 100
	sasl.client.callback.handler.class = null
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism = GSSAPI
	security.protocol = PLAINTEXT
	send.buffer.bytes = 131072
	ssl.cipher.suites = null
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.timeout.ms = 60000
	transactional.id = null
	value.serializer = class org.apache.kafka.common.serialization.StringSerializer

16:43:04.764 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bufferpool-wait-time
16:43:04.782 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name buffer-exhausted-records
16:43:04.797 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name errors
16:43:04.819 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name produce-throttle-time
16:43:05.202 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name connections-closed:
16:43:05.203 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name connections-created:
16:43:05.204 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-authentication:
16:43:05.204 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-reauthentication:
16:43:05.205 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-authentication-no-reauth:
16:43:05.205 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name failed-authentication:
16:43:05.205 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name failed-reauthentication:
16:43:05.206 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name reauthentication-latency:
16:43:05.206 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-sent-received:
16:43:05.207 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-sent:
16:43:05.208 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-received:
16:43:05.209 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name select-time:
16:43:05.210 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name io-time:
16:43:05.220 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name batch-size
16:43:05.221 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name compression-rate
16:43:05.221 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name queue-time
16:43:05.221 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name request-time
16:43:05.222 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name records-per-request
16:43:05.222 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name record-retries
16:43:05.223 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name record-size
16:43:05.225 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name batch-split-rate
16:43:05.227 [main] WARN org.apache.kafka.clients.producer.ProducerConfig - The configuration 'application.id' was supplied but isn't a known config.
16:43:05.227 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.producer.internals.Sender - [Producer clientId=producer-1] Starting Kafka producer I/O thread.
16:43:05.228 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initialize connection to node 192.168.50.67:9092 (id: -1 rack: null) for sending metadata request
16:43:05.231 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initiating connection to node 192.168.50.67:9092 (id: -1 rack: null) using address /192.168.50.67
16:43:05.233 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.3.0
16:43:05.233 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: fc1aaa116b661c8a
16:43:05.233 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1565685785227
16:43:05.237 [main] DEBUG org.apache.kafka.clients.producer.KafkaProducer - [Producer clientId=producer-1] Kafka producer started
16:43:05.259 [main] INFO org.apache.kafka.clients.consumer.ConsumerConfig - ConsumerConfig values: 
	allow.auto.create.topics = true
	auto.commit.interval.ms = 1000
	auto.offset.reset = latest
	bootstrap.servers = [192.168.50.67:9092]
	check.crcs = true
	client.dns.lookup = default
	client.id = 
	client.rack = 
	connections.max.idle.ms = 540000
	default.api.timeout.ms = 60000
	enable.auto.commit = true
	exclude.internal.topics = true
	fetch.max.bytes = 52428800
	fetch.max.wait.ms = 500
	fetch.min.bytes = 1
	group.id = skindow
	group.instance.id = null
	heartbeat.interval.ms = 3000
	interceptor.classes = []
	internal.leave.group.on.close = true
	isolation.level = read_uncommitted
	key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
	max.partition.fetch.bytes = 1048576
	max.poll.interval.ms = 300000
	max.poll.records = 500
	metadata.max.age.ms = 300000
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
	receive.buffer.bytes = 65536
	reconnect.backoff.max.ms = 1000
	reconnect.backoff.ms = 50
	request.timeout.ms = 30000
	retry.backoff.ms = 100
	sasl.client.callback.handler.class = null
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism = GSSAPI
	security.protocol = PLAINTEXT
	send.buffer.bytes = 131072
	session.timeout.ms = 10000
	ssl.cipher.suites = null
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer

16:43:05.260 [main] DEBUG org.apache.kafka.clients.consumer.KafkaConsumer - [Consumer clientId=consumer-1, groupId=skindow] Initializing the Kafka consumer
16:43:05.285 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name fetch-throttle-time
16:43:05.286 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name connections-closed:
16:43:05.287 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name connections-created:
16:43:05.287 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-authentication:
16:43:05.288 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-reauthentication:
16:43:05.288 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name successful-authentication-no-reauth:
16:43:05.288 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name failed-authentication:
16:43:05.289 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name failed-reauthentication:
16:43:05.289 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name reauthentication-latency:
16:43:05.302 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-sent-received:
16:43:05.303 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-sent:
16:43:05.304 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-received:
16:43:05.305 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name select-time:
16:43:05.306 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name io-time:
16:43:05.307 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.bytes-sent
16:43:05.310 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.bytes-received
16:43:05.311 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.latency
16:43:05.319 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.network.Selector - [Producer clientId=producer-1] Created socket with SO_RCVBUF = 32768, SO_SNDBUF = 131072, SO_TIMEOUT = 0 to node -1
16:43:05.335 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name heartbeat-latency
16:43:05.337 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name join-latency
16:43:05.338 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name sync-latency
16:43:05.342 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name commit-latency
16:43:05.349 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name bytes-fetched
16:43:05.352 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name records-fetched
16:43:05.353 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name fetch-latency
16:43:05.353 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name records-lag
16:43:05.353 [main] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name records-lead
16:43:05.358 [main] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'application.id' was supplied but isn't a known config.
16:43:05.359 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.3.0
16:43:05.359 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: fc1aaa116b661c8a
16:43:05.359 [main] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1565685785359
16:43:05.359 [main] DEBUG org.apache.kafka.clients.consumer.KafkaConsumer - [Consumer clientId=consumer-1, groupId=skindow] Kafka consumer initialized
16:43:05.363 [pool-1-thread-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro开始生产主题skindow-test
16:43:05.697 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Completed connection to node -1. Fetching API versions.
16:43:05.697 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initiating API versions fetch from node -1.
16:43:05.720 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Recorded API versions for node -1: (Produce(0): 0 to 7 [usable: 7], Fetch(1): 0 to 11 [usable: 11], ListOffsets(2): 0 to 5 [usable: 5], Metadata(3): 0 to 8 [usable: 8], LeaderAndIsr(4): 0 to 2 [usable: 2], StopReplica(5): 0 to 1 [usable: 1], UpdateMetadata(6): 0 to 5 [usable: 5], ControlledShutdown(7): 0 to 2 [usable: 2], OffsetCommit(8): 0 to 7 [usable: 7], OffsetFetch(9): 0 to 5 [usable: 5], FindCoordinator(10): 0 to 2 [usable: 2], JoinGroup(11): 0 to 5 [usable: 5], Heartbeat(12): 0 to 3 [usable: 3], LeaveGroup(13): 0 to 2 [usable: 2], SyncGroup(14): 0 to 3 [usable: 3], DescribeGroups(15): 0 to 3 [usable: 3], ListGroups(16): 0 to 2 [usable: 2], SaslHandshake(17): 0 to 1 [usable: 1], ApiVersions(18): 0 to 2 [usable: 2], CreateTopics(19): 0 to 3 [usable: 3], DeleteTopics(20): 0 to 3 [usable: 3], DeleteRecords(21): 0 to 1 [usable: 1], InitProducerId(22): 0 to 1 [usable: 1], OffsetForLeaderEpoch(23): 0 to 3 [usable: 3], AddPartitionsToTxn(24): 0 to 1 [usable: 1], AddOffsetsToTxn(25): 0 to 1 [usable: 1], EndTxn(26): 0 to 1 [usable: 1], WriteTxnMarkers(27): 0 [usable: 0], TxnOffsetCommit(28): 0 to 2 [usable: 2], DescribeAcls(29): 0 to 1 [usable: 1], CreateAcls(30): 0 to 1 [usable: 1], DeleteAcls(31): 0 to 1 [usable: 1], DescribeConfigs(32): 0 to 2 [usable: 2], AlterConfigs(33): 0 to 1 [usable: 1], AlterReplicaLogDirs(34): 0 to 1 [usable: 1], DescribeLogDirs(35): 0 to 1 [usable: 1], SaslAuthenticate(36): 0 to 1 [usable: 1], CreatePartitions(37): 0 to 1 [usable: 1], CreateDelegationToken(38): 0 to 1 [usable: 1], RenewDelegationToken(39): 0 to 1 [usable: 1], ExpireDelegationToken(40): 0 to 1 [usable: 1], DescribeDelegationToken(41): 0 to 1 [usable: 1], DeleteGroups(42): 0 to 1 [usable: 1], ElectPreferredLeaders(43): 0 [usable: 0], IncrementalAlterConfigs(44): 0 [usable: 0])
16:43:05.730 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Sending metadata request MetadataRequestData(topics=[], allowAutoTopicCreation=true, includeClusterAuthorizedOperations=false, includeTopicAuthorizedOperations=false) to node 192.168.50.67:9092 (id: -1 rack: null)
16:43:05.760 [kafka-producer-network-thread | producer-1] INFO org.apache.kafka.clients.Metadata - [Producer clientId=producer-1] Cluster ID: v9szchcSSqSySx1-Tv2eug
16:43:05.760 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.Metadata - [Producer clientId=producer-1] Updated cluster metadata updateVersion 2 to MetadataCache{cluster=Cluster(id = v9szchcSSqSySx1-Tv2eug, nodes = [192.168.50.67:9092 (id: 0 rack: null)], partitions = [], controller = 192.168.50.67:9092 (id: 0 rack: null))}
16:43:10.370 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initialize connection to node 192.168.50.67:9092 (id: 0 rack: null) for sending metadata request
16:43:10.370 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initiating connection to node 192.168.50.67:9092 (id: 0 rack: null) using address /192.168.50.67
16:43:10.371 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.bytes-sent
16:43:10.372 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.bytes-received
16:43:10.373 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.latency
16:43:10.374 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.network.Selector - [Producer clientId=producer-1] Created socket with SO_RCVBUF = 32768, SO_SNDBUF = 131072, SO_TIMEOUT = 0 to node 0
16:43:10.374 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Completed connection to node 0. Fetching API versions.
16:43:10.374 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Initiating API versions fetch from node 0.
16:43:10.384 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Recorded API versions for node 0: (Produce(0): 0 to 7 [usable: 7], Fetch(1): 0 to 11 [usable: 11], ListOffsets(2): 0 to 5 [usable: 5], Metadata(3): 0 to 8 [usable: 8], LeaderAndIsr(4): 0 to 2 [usable: 2], StopReplica(5): 0 to 1 [usable: 1], UpdateMetadata(6): 0 to 5 [usable: 5], ControlledShutdown(7): 0 to 2 [usable: 2], OffsetCommit(8): 0 to 7 [usable: 7], OffsetFetch(9): 0 to 5 [usable: 5], FindCoordinator(10): 0 to 2 [usable: 2], JoinGroup(11): 0 to 5 [usable: 5], Heartbeat(12): 0 to 3 [usable: 3], LeaveGroup(13): 0 to 2 [usable: 2], SyncGroup(14): 0 to 3 [usable: 3], DescribeGroups(15): 0 to 3 [usable: 3], ListGroups(16): 0 to 2 [usable: 2], SaslHandshake(17): 0 to 1 [usable: 1], ApiVersions(18): 0 to 2 [usable: 2], CreateTopics(19): 0 to 3 [usable: 3], DeleteTopics(20): 0 to 3 [usable: 3], DeleteRecords(21): 0 to 1 [usable: 1], InitProducerId(22): 0 to 1 [usable: 1], OffsetForLeaderEpoch(23): 0 to 3 [usable: 3], AddPartitionsToTxn(24): 0 to 1 [usable: 1], AddOffsetsToTxn(25): 0 to 1 [usable: 1], EndTxn(26): 0 to 1 [usable: 1], WriteTxnMarkers(27): 0 [usable: 0], TxnOffsetCommit(28): 0 to 2 [usable: 2], DescribeAcls(29): 0 to 1 [usable: 1], CreateAcls(30): 0 to 1 [usable: 1], DeleteAcls(31): 0 to 1 [usable: 1], DescribeConfigs(32): 0 to 2 [usable: 2], AlterConfigs(33): 0 to 1 [usable: 1], AlterReplicaLogDirs(34): 0 to 1 [usable: 1], DescribeLogDirs(35): 0 to 1 [usable: 1], SaslAuthenticate(36): 0 to 1 [usable: 1], CreatePartitions(37): 0 to 1 [usable: 1], CreateDelegationToken(38): 0 to 1 [usable: 1], RenewDelegationToken(39): 0 to 1 [usable: 1], ExpireDelegationToken(40): 0 to 1 [usable: 1], DescribeDelegationToken(41): 0 to 1 [usable: 1], DeleteGroups(42): 0 to 1 [usable: 1], ElectPreferredLeaders(43): 0 [usable: 0], IncrementalAlterConfigs(44): 0 [usable: 0])
16:43:10.386 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=producer-1] Sending metadata request MetadataRequestData(topics=[MetadataRequestTopic(name='skindow-test')], allowAutoTopicCreation=true, includeClusterAuthorizedOperations=false, includeTopicAuthorizedOperations=false) to node 192.168.50.67:9092 (id: 0 rack: null)
16:43:10.397 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.Metadata - [Producer clientId=producer-1] Updating last seen epoch from null to 0 for partition skindow-test-0
16:43:10.399 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.clients.Metadata - [Producer clientId=producer-1] Updated cluster metadata updateVersion 3 to MetadataCache{cluster=Cluster(id = v9szchcSSqSySx1-Tv2eug, nodes = [192.168.50.67:9092 (id: 0 rack: null)], partitions = [Partition(topic = skindow-test, partition = 0, leader = 0, replicas = [0], isr = [0], offlineReplicas = [])], controller = 192.168.50.67:9092 (id: 0 rack: null))}
16:43:10.424 [pool-1-thread-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro开始生产主题skindow-test
16:43:10.431 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.records-per-batch
16:43:10.432 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.bytes
16:43:10.432 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.compression-rate
16:43:10.432 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.record-retries
16:43:10.432 [kafka-producer-network-thread | producer-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.record-errors
16:43:10.442 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro于2019-08-13 16:43:10生产skindow-pro-key-0完成
16:43:10.442 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro已成功生产18条消息
16:43:10.442 [main] INFO com.skindow.kafka.KafkaMain - ======================
16:43:10.442 [main] INFO com.skindow.kafka.KafkaMain - 开始执行消费线程
16:43:10.443 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - skindow-con开始消费主题skindow-test
16:43:15.424 [pool-1-thread-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro开始生产主题skindow-test
16:43:15.427 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro于2019-08-13 16:43:15生产skindow-pro-key-1完成
16:43:15.427 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro已成功生产19条消息
16:43:15.444 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.KafkaConsumer - [Consumer clientId=consumer-1, groupId=skindow] Subscribed to topic(s): skindow-test
16:43:15.446 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending FindCoordinator request to broker 192.168.50.67:9092 (id: -1 rack: null)
16:43:15.450 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating connection to node 192.168.50.67:9092 (id: -1 rack: null) using address /192.168.50.67
16:43:15.452 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.bytes-sent
16:43:15.453 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.bytes-received
16:43:15.453 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node--1.latency
16:43:15.454 [pool-2-thread-1] DEBUG org.apache.kafka.common.network.Selector - [Consumer clientId=consumer-1, groupId=skindow] Created socket with SO_RCVBUF = 65536, SO_SNDBUF = 131072, SO_TIMEOUT = 0 to node -1
16:43:15.454 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Completed connection to node -1. Fetching API versions.
16:43:15.454 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating API versions fetch from node -1.
16:43:15.457 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Recorded API versions for node -1: (Produce(0): 0 to 7 [usable: 7], Fetch(1): 0 to 11 [usable: 11], ListOffsets(2): 0 to 5 [usable: 5], Metadata(3): 0 to 8 [usable: 8], LeaderAndIsr(4): 0 to 2 [usable: 2], StopReplica(5): 0 to 1 [usable: 1], UpdateMetadata(6): 0 to 5 [usable: 5], ControlledShutdown(7): 0 to 2 [usable: 2], OffsetCommit(8): 0 to 7 [usable: 7], OffsetFetch(9): 0 to 5 [usable: 5], FindCoordinator(10): 0 to 2 [usable: 2], JoinGroup(11): 0 to 5 [usable: 5], Heartbeat(12): 0 to 3 [usable: 3], LeaveGroup(13): 0 to 2 [usable: 2], SyncGroup(14): 0 to 3 [usable: 3], DescribeGroups(15): 0 to 3 [usable: 3], ListGroups(16): 0 to 2 [usable: 2], SaslHandshake(17): 0 to 1 [usable: 1], ApiVersions(18): 0 to 2 [usable: 2], CreateTopics(19): 0 to 3 [usable: 3], DeleteTopics(20): 0 to 3 [usable: 3], DeleteRecords(21): 0 to 1 [usable: 1], InitProducerId(22): 0 to 1 [usable: 1], OffsetForLeaderEpoch(23): 0 to 3 [usable: 3], AddPartitionsToTxn(24): 0 to 1 [usable: 1], AddOffsetsToTxn(25): 0 to 1 [usable: 1], EndTxn(26): 0 to 1 [usable: 1], WriteTxnMarkers(27): 0 [usable: 0], TxnOffsetCommit(28): 0 to 2 [usable: 2], DescribeAcls(29): 0 to 1 [usable: 1], CreateAcls(30): 0 to 1 [usable: 1], DeleteAcls(31): 0 to 1 [usable: 1], DescribeConfigs(32): 0 to 2 [usable: 2], AlterConfigs(33): 0 to 1 [usable: 1], AlterReplicaLogDirs(34): 0 to 1 [usable: 1], DescribeLogDirs(35): 0 to 1 [usable: 1], SaslAuthenticate(36): 0 to 1 [usable: 1], CreatePartitions(37): 0 to 1 [usable: 1], CreateDelegationToken(38): 0 to 1 [usable: 1], RenewDelegationToken(39): 0 to 1 [usable: 1], ExpireDelegationToken(40): 0 to 1 [usable: 1], DescribeDelegationToken(41): 0 to 1 [usable: 1], DeleteGroups(42): 0 to 1 [usable: 1], ElectPreferredLeaders(43): 0 [usable: 0], IncrementalAlterConfigs(44): 0 [usable: 0])
16:43:15.457 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Sending metadata request MetadataRequestData(topics=[MetadataRequestTopic(name='skindow-test')], allowAutoTopicCreation=true, includeClusterAuthorizedOperations=false, includeTopicAuthorizedOperations=false) to node 192.168.50.67:9092 (id: -1 rack: null)
16:43:15.461 [pool-2-thread-1] DEBUG org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-1, groupId=skindow] Updating last seen epoch from null to 0 for partition skindow-test-0
16:43:15.461 [pool-2-thread-1] INFO org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-1, groupId=skindow] Cluster ID: v9szchcSSqSySx1-Tv2eug
16:43:15.462 [pool-2-thread-1] DEBUG org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-1, groupId=skindow] Updated cluster metadata updateVersion 2 to MetadataCache{cluster=Cluster(id = v9szchcSSqSySx1-Tv2eug, nodes = [192.168.50.67:9092 (id: 0 rack: null)], partitions = [Partition(topic = skindow-test, partition = 0, leader = 0, replicas = [0], isr = [0], offlineReplicas = [])], controller = 192.168.50.67:9092 (id: 0 rack: null))}
16:43:15.465 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Received FindCoordinator response ClientResponse(receivedTimeMs=1565685795464, latencyMs=17, disconnected=false, requestHeader=RequestHeader(apiKey=FIND_COORDINATOR, apiVersion=2, clientId=consumer-1, correlationId=0), responseBody=FindCoordinatorResponseData(throttleTimeMs=0, errorCode=0, errorMessage='NONE', nodeId=0, host='192.168.50.67', port=9092))
16:43:15.466 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Discovered group coordinator 192.168.50.67:9092 (id: 2147483647 rack: null)
16:43:15.466 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating connection to node 192.168.50.67:9092 (id: 2147483647 rack: null) using address /192.168.50.67
16:43:15.472 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending synchronous auto-commit of offsets {}
16:43:15.472 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Revoking previously assigned partitions []
16:43:15.472 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Disabling heartbeat thread
16:43:15.472 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] (Re-)joining group
16:43:15.474 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Joining group with current subscription: [skindow-test]
16:43:15.479 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending JoinGroup (JoinGroupRequestData(groupId='skindow', sessionTimeoutMs=10000, rebalanceTimeoutMs=300000, memberId='', groupInstanceId='null', protocolType='consumer', protocols=[JoinGroupRequestProtocol(name='range', metadata=[0, 0, 0, 0, 0, 1, 0, 12, 115, 107, 105, 110, 100, 111, 119, 45, 116, 101, 115, 116, 0, 0, 0, 0])])) to coordinator 192.168.50.67:9092 (id: 2147483647 rack: null)
16:43:15.471 [kafka-coordinator-heartbeat-thread | skindow] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Heartbeat thread started
16:43:15.481 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-2147483647.bytes-sent
16:43:15.482 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-2147483647.bytes-received
16:43:15.483 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-2147483647.latency
16:43:15.484 [pool-2-thread-1] DEBUG org.apache.kafka.common.network.Selector - [Consumer clientId=consumer-1, groupId=skindow] Created socket with SO_RCVBUF = 65536, SO_SNDBUF = 131072, SO_TIMEOUT = 0 to node 2147483647
16:43:15.484 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Completed connection to node 2147483647. Fetching API versions.
16:43:15.484 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating API versions fetch from node 2147483647.
16:43:15.486 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Recorded API versions for node 2147483647: (Produce(0): 0 to 7 [usable: 7], Fetch(1): 0 to 11 [usable: 11], ListOffsets(2): 0 to 5 [usable: 5], Metadata(3): 0 to 8 [usable: 8], LeaderAndIsr(4): 0 to 2 [usable: 2], StopReplica(5): 0 to 1 [usable: 1], UpdateMetadata(6): 0 to 5 [usable: 5], ControlledShutdown(7): 0 to 2 [usable: 2], OffsetCommit(8): 0 to 7 [usable: 7], OffsetFetch(9): 0 to 5 [usable: 5], FindCoordinator(10): 0 to 2 [usable: 2], JoinGroup(11): 0 to 5 [usable: 5], Heartbeat(12): 0 to 3 [usable: 3], LeaveGroup(13): 0 to 2 [usable: 2], SyncGroup(14): 0 to 3 [usable: 3], DescribeGroups(15): 0 to 3 [usable: 3], ListGroups(16): 0 to 2 [usable: 2], SaslHandshake(17): 0 to 1 [usable: 1], ApiVersions(18): 0 to 2 [usable: 2], CreateTopics(19): 0 to 3 [usable: 3], DeleteTopics(20): 0 to 3 [usable: 3], DeleteRecords(21): 0 to 1 [usable: 1], InitProducerId(22): 0 to 1 [usable: 1], OffsetForLeaderEpoch(23): 0 to 3 [usable: 3], AddPartitionsToTxn(24): 0 to 1 [usable: 1], AddOffsetsToTxn(25): 0 to 1 [usable: 1], EndTxn(26): 0 to 1 [usable: 1], WriteTxnMarkers(27): 0 [usable: 0], TxnOffsetCommit(28): 0 to 2 [usable: 2], DescribeAcls(29): 0 to 1 [usable: 1], CreateAcls(30): 0 to 1 [usable: 1], DeleteAcls(31): 0 to 1 [usable: 1], DescribeConfigs(32): 0 to 2 [usable: 2], AlterConfigs(33): 0 to 1 [usable: 1], AlterReplicaLogDirs(34): 0 to 1 [usable: 1], DescribeLogDirs(35): 0 to 1 [usable: 1], SaslAuthenticate(36): 0 to 1 [usable: 1], CreatePartitions(37): 0 to 1 [usable: 1], CreateDelegationToken(38): 0 to 1 [usable: 1], RenewDelegationToken(39): 0 to 1 [usable: 1], ExpireDelegationToken(40): 0 to 1 [usable: 1], DescribeDelegationToken(41): 0 to 1 [usable: 1], DeleteGroups(42): 0 to 1 [usable: 1], ElectPreferredLeaders(43): 0 [usable: 0], IncrementalAlterConfigs(44): 0 [usable: 0])
16:43:15.489 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Disabling heartbeat thread
16:43:15.489 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] (Re-)joining group
16:43:15.489 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Joining group with current subscription: [skindow-test]
16:43:15.489 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending JoinGroup (JoinGroupRequestData(groupId='skindow', sessionTimeoutMs=10000, rebalanceTimeoutMs=300000, memberId='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', groupInstanceId='null', protocolType='consumer', protocols=[JoinGroupRequestProtocol(name='range', metadata=[0, 0, 0, 0, 0, 1, 0, 12, 115, 107, 105, 110, 100, 111, 119, 45, 116, 101, 115, 116, 0, 0, 0, 0])])) to coordinator 192.168.50.67:9092 (id: 2147483647 rack: null)
16:43:15.493 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Received successful JoinGroup response: JoinGroupResponseData(throttleTimeMs=0, errorCode=0, generationId=21, protocolName='range', leader='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', memberId='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', members=[JoinGroupResponseMember(memberId='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', groupInstanceId='null', metadata=[0, 0, 0, 0, 0, 1, 0, 12, 115, 107, 105, 110, 100, 111, 119, 45, 116, 101, 115, 116, 0, 0, 0, 0])])
16:43:15.493 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Performing assignment using strategy range with subscriptions {consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f=Subscription(topics=[skindow-test])}
16:43:15.495 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Finished assignment for group: {consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f=Assignment(partitions=[skindow-test-0])}
16:43:15.496 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending leader SyncGroup to coordinator 192.168.50.67:9092 (id: 2147483647 rack: null): SyncGroupRequestData(groupId='skindow', generationId=21, memberId='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', groupInstanceId='null', assignments=[SyncGroupRequestAssignment(memberId='consumer-1-651e3acc-6375-459b-923e-3e2db7dd738f', assignment=[0, 0, 0, 0, 0, 1, 0, 12, 115, 107, 105, 110, 100, 111, 119, 45, 116, 101, 115, 116, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])])
16:43:15.502 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Successfully joined group with generation 21
16:43:15.502 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Enabling heartbeat thread
16:43:15.508 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Setting newly assigned partitions: skindow-test-0
16:43:15.530 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Fetching committed offsets for partitions: [skindow-test-0]
16:43:15.536 [pool-2-thread-1] INFO org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Setting offset for partition skindow-test-0 to the committed offset FetchPosition{offset=16, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=192.168.50.67:9092 (id: 0 rack: null), epoch=0}}
16:43:15.538 [pool-2-thread-1] DEBUG org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-1, groupId=skindow] Not replacing existing epoch 0 with new epoch 0
16:43:15.545 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating connection to node 192.168.50.67:9092 (id: 0 rack: null) using address /192.168.50.67
16:43:15.546 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.bytes-sent
16:43:15.547 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.bytes-received
16:43:15.548 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name node-0.latency
16:43:15.549 [pool-2-thread-1] DEBUG org.apache.kafka.common.network.Selector - [Consumer clientId=consumer-1, groupId=skindow] Created socket with SO_RCVBUF = 65536, SO_SNDBUF = 131072, SO_TIMEOUT = 0 to node 0
16:43:15.549 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Completed connection to node 0. Fetching API versions.
16:43:15.549 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Initiating API versions fetch from node 0.
16:43:15.552 [pool-2-thread-1] DEBUG org.apache.kafka.clients.NetworkClient - [Consumer clientId=consumer-1, groupId=skindow] Recorded API versions for node 0: (Produce(0): 0 to 7 [usable: 7], Fetch(1): 0 to 11 [usable: 11], ListOffsets(2): 0 to 5 [usable: 5], Metadata(3): 0 to 8 [usable: 8], LeaderAndIsr(4): 0 to 2 [usable: 2], StopReplica(5): 0 to 1 [usable: 1], UpdateMetadata(6): 0 to 5 [usable: 5], ControlledShutdown(7): 0 to 2 [usable: 2], OffsetCommit(8): 0 to 7 [usable: 7], OffsetFetch(9): 0 to 5 [usable: 5], FindCoordinator(10): 0 to 2 [usable: 2], JoinGroup(11): 0 to 5 [usable: 5], Heartbeat(12): 0 to 3 [usable: 3], LeaveGroup(13): 0 to 2 [usable: 2], SyncGroup(14): 0 to 3 [usable: 3], DescribeGroups(15): 0 to 3 [usable: 3], ListGroups(16): 0 to 2 [usable: 2], SaslHandshake(17): 0 to 1 [usable: 1], ApiVersions(18): 0 to 2 [usable: 2], CreateTopics(19): 0 to 3 [usable: 3], DeleteTopics(20): 0 to 3 [usable: 3], DeleteRecords(21): 0 to 1 [usable: 1], InitProducerId(22): 0 to 1 [usable: 1], OffsetForLeaderEpoch(23): 0 to 3 [usable: 3], AddPartitionsToTxn(24): 0 to 1 [usable: 1], AddOffsetsToTxn(25): 0 to 1 [usable: 1], EndTxn(26): 0 to 1 [usable: 1], WriteTxnMarkers(27): 0 [usable: 0], TxnOffsetCommit(28): 0 to 2 [usable: 2], DescribeAcls(29): 0 to 1 [usable: 1], CreateAcls(30): 0 to 1 [usable: 1], DeleteAcls(31): 0 to 1 [usable: 1], DescribeConfigs(32): 0 to 2 [usable: 2], AlterConfigs(33): 0 to 1 [usable: 1], AlterReplicaLogDirs(34): 0 to 1 [usable: 1], DescribeLogDirs(35): 0 to 1 [usable: 1], SaslAuthenticate(36): 0 to 1 [usable: 1], CreatePartitions(37): 0 to 1 [usable: 1], CreateDelegationToken(38): 0 to 1 [usable: 1], RenewDelegationToken(39): 0 to 1 [usable: 1], ExpireDelegationToken(40): 0 to 1 [usable: 1], DescribeDelegationToken(41): 0 to 1 [usable: 1], DeleteGroups(42): 0 to 1 [usable: 1], ElectPreferredLeaders(43): 0 [usable: 0], IncrementalAlterConfigs(44): 0 [usable: 0])
16:43:15.563 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.OffsetsForLeaderEpochClient - [Consumer clientId=consumer-1, groupId=skindow] Handling OffsetsForLeaderEpoch response for skindow-test-0. Got offset 19 for epoch 0
16:43:15.570 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=consumer-1, groupId=skindow] Added READ_UNCOMMITTED fetch request for partition skindow-test-0 at position FetchPosition{offset=16, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=192.168.50.67:9092 (id: 0 rack: null), epoch=0}} to node 192.168.50.67:9092 (id: 0 rack: null)
16:43:15.570 [pool-2-thread-1] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=consumer-1, groupId=skindow] Built full fetch (sessionId=INVALID, epoch=INITIAL) for node 0 with 1 partition(s).
16:43:15.571 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=consumer-1, groupId=skindow] Sending READ_UNCOMMITTED FullFetchRequest(skindow-test-0) to broker 192.168.50.67:9092 (id: 0 rack: null)
16:43:15.579 [pool-2-thread-1] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=consumer-1, groupId=skindow] Node 0 sent a full fetch response that created a new incremental fetch session 884485664 with 1 response partition(s)
16:43:15.582 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=consumer-1, groupId=skindow] Fetch READ_UNCOMMITTED at offset 16 for partition skindow-test-0 returned fetch data (error=NONE, highWaterMark=19, lastStableOffset = 19, logStartOffset = 0, preferredReadReplica = absent, abortedTransactions = null, recordsSizeInBytes=342)
16:43:15.588 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.bytes-fetched
16:43:15.589 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name topic.skindow-test.records-fetched
16:43:15.589 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name skindow-test-0.records-lag
16:43:15.590 [pool-2-thread-1] DEBUG org.apache.kafka.common.metrics.Metrics - Added sensor with name skindow-test-0.records-lead
16:43:15.591 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=consumer-1, groupId=skindow] Added READ_UNCOMMITTED fetch request for partition skindow-test-0 at position FetchPosition{offset=19, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=192.168.50.67:9092 (id: 0 rack: null), epoch=0}} to node 192.168.50.67:9092 (id: 0 rack: null)
16:43:15.592 [pool-2-thread-1] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=consumer-1, groupId=skindow] Built incremental fetch (sessionId=884485664, epoch=1) for node 0. Added 0 partition(s), altered 1 partition(s), removed 0 partition(s) out of 1 partition(s)
16:43:15.592 [pool-2-thread-1] DEBUG org.apache.kafka.clients.consumer.internals.Fetcher - [Consumer clientId=consumer-1, groupId=skindow] Sending READ_UNCOMMITTED IncrementalFetchRequest(toSend=(skindow-test-0), toForget=(), implied=()) to broker 192.168.50.67:9092 (id: 0 rack: null)
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - =========消化中===========
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - skindow-con 消费者offset = 16, key = skindow-pro-key-8, value = skindow-pro Under production8
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - =========消化中===========
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - skindow-con 消费者offset = 17, key = skindow-pro-key-0, value = skindow-pro Under production0
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - =========消化中===========
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - skindow-con 消费者offset = 18, key = skindow-pro-key-1, value = skindow-pro Under production1
16:43:15.595 [pool-2-thread-1] INFO com.skindow.kafka.KafkaDemoConsumer - skindow-con开始消费主题skindow-test
16:43:16.204 [kafka-coordinator-heartbeat-thread | skindow] DEBUG org.apache.kafka.clients.FetchSessionHandler - [Consumer clientId=consumer-1, groupId=skindow] Node 0 sent an incremental fetch response for session 884485664 with 0 response partition(s), 1 implied partition(s)
16:43:18.521 [kafka-coordinator-heartbeat-thread | skindow] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Sending Heartbeat request to coordinator 192.168.50.67:9092 (id: 2147483647 rack: null)
16:43:18.754 [kafka-coordinator-heartbeat-thread | skindow] DEBUG org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-1, groupId=skindow] Received successful Heartbeat response
Disconnected from the target VM, address: '127.0.0.1:50040', transport: 'socket'
16:43:20.424 [pool-1-thread-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro开始生产主题skindow-test
16:43:20.428 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro于2019-08-13 16:43:20生产skindow-pro-key-2完成
16:43:20.429 [kafka-producer-network-thread | producer-1] INFO com.skindow.kafka.KafkaDemoProducer - skindow-pro已成功生产20条消息

Process finished with exit code 1

项目地址:
https://github.com/skindowSyc/firstProject.git 对应tag kafkaProAndConDemo

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值