kafka实战之调查问卷

1、 调查问卷查询模板接口:

功能:

接口 URL:/v1/template

接口请求方式:GET

请求参数:

请求参数参数类型是否必填默认值参数描述

请求参数实例:无

响应结果:

请求参数参数类型参数描述
requestIdstring单次请求唯一标识
resultobject返回结果

result:问卷模板:由多个问题组成

请求参数参数类型参数描述
templateIdstring调查问卷模板编号
templatearray调查问卷模板内容

template:即问题

请求参数参数类型参数描述
questionIdstring调查问卷模板编号
questionstring调查问卷模板内容
optionsarray调查问卷选项
响应结果演示:
{
   "requestId": "", 
   "result": {
       "templateId": "001",
       "template": [
           {
               "questionId": "1",
               "question": "今天几号",
               "answer": "", 
               "options": [
                   {"label": "1 号", "value": "A"},
                   {"label": "2 号", "value": "B"},
                   {"label": "3 号", "value": "C"},
                   {"label": "4 号", "value": "D"}]
           },
           {
               "questionId": "2", 
               "question": "你喜爱的颜色", 
               "answer": "",
               "options": [
                   {"label": "红色", "value": "A"},
                   {"label": "黄色", "value": "B"},
                   {"label": "绿色", "value": "C"},
                   {"label": "紫色", "value": "D"}
               ]
           }
       ]
   }
}

2、 调查问卷结果查询接口:

接口 URL:/v1/template/report

接口请求方式: POST

请求参数:

请求参数参数类型是否必填默认值参数描述
templateIdstring模板编号
resultarray调查问卷结果

result:

请求参数参数类型是否必填默认值参数描述
questionIdstring问题编号
questionstring问题内容
answerstring问题所选答案

请求参数实例:

{
    templateId:"001", 
    result:[
        {"questionId":"1",
        "question":"今天几号",
        "answer":"A"},
        {"questionId":"2",
        "question":"你喜爱的颜色",
        "answer":"B"}
    ]
}

响应结果:

请求参数参数类型参数描述
requestIdstring单次请求唯一标识

响应结果演示:

{
"requestId":""
}

3、 调查问卷结果统计接口:

接口 URL:/v1/template/result接口请求方式: GET

请求参数:

请求参数参数类型是否必填默认值参数描述
templateIdstring模板编号

请求参数实例:

{
    templateId:"001"  //非必填
}

响应结果:

请求参数参数类型参数描述
requestIdstring单次请求唯一标识
resultobject请求返回结果

result:

请求参数参数类型参数描述
templateIdstring单次请求唯一标识
totalNumberstring请求返回结果
statisticsarray统计结果

statistics:

请求参数参数类型参数描述
questionIdstring问题编号
questionstring问题描述
answersarray答案结果统计
响应结果演示:
{
   "requestId":"",
   "result":{ 
       "templateId": "001",
       "totalNumber": "102", 
       "statistics": [
           {
               "questionId": "1",
               "question": "今天几号", 
               "answers": [
                   {"label": "A", "value": 10},
                   {"label": "B", "value": 50},
                   {"label": "C", "value": 12},
                   {"label": "D", "value": 17}
               ]
           },
           {
               "questionId": "2", 
               "question": "你喜爱的颜色", 
               "answers": [
                   {"label": "A", "value": 12},
                   {"label": "B", "value": 52},
                   {"label": "C", "value": 17},
                   {"label": "D", "value": 17}
               ]
           }
       ]
   }
}

controller

WechatTemplateController
package com.imooc.jiangzh.kafka.wechat.controller;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.google.common.collect.Maps;
import com.imooc.jiangzh.kafka.wechat.common.BaseResponseVO;
import com.imooc.jiangzh.kafka.wechat.conf.WechatTemplateProperties;
import com.imooc.jiangzh.kafka.wechat.service.WechatTemplateService;
import com.imooc.jiangzh.kafka.wechat.utils.FileUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;

import java.util.Map;

@RestController
@RequestMapping(value = "/v1")
public class WechatTemplateController {

    @Autowired
    private WechatTemplateProperties properties;

    @Autowired
    private WechatTemplateService wechatTemplateService;

    // 获取问题
    @RequestMapping(value = "/template", method = RequestMethod.GET)
    public BaseResponseVO getTemplate(){

        WechatTemplateProperties.WechatTemplate wechatTemplate = wechatTemplateService.getWechatTemplate();

        Map<String, Object> result = Maps.newHashMap();
        result.put("templateId",wechatTemplate.getTemplateId());
        result.put("template", FileUtils.readFile2JsonArray(wechatTemplate.getTemplateFilePath()));

        return BaseResponseVO.success(result);
    }

    // 展示结果
    @RequestMapping(value = "/template/result", method = RequestMethod.GET)
    public BaseResponseVO templateStatistics(
            @RequestParam(value = "templateId", required = false)String templateId){

        JSONObject statistics = wechatTemplateService.templateStatistics(templateId);

        return BaseResponseVO.success(statistics);
    }

    // 提交回答
    @RequestMapping(value = "/template/report", method = RequestMethod.POST)
    public BaseResponseVO dataReported(
            @RequestBody String reportData){

        wechatTemplateService.templateReported(JSON.parseObject(reportData));

        return BaseResponseVO.success();
    }
}

Service

WechatTemplateServiceImpl
package com.imooc.jiangzh.kafka.wechat.service;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.imooc.jiangzh.kafka.wechat.conf.WechatTemplateProperties;
import com.imooc.jiangzh.kafka.wechat.utils.FileUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.List;
import java.util.Optional;
import java.util.concurrent.Future;

@Slf4j
@Service
public class WechatTemplateServiceImpl implements WechatTemplateService{

    @Autowired
    private WechatTemplateProperties properties;

    @Autowired
    private Producer producer;

    @Override// 获取微信调查问卷模板 - 获取目前active为true的模板就可以了
    public WechatTemplateProperties.WechatTemplate getWechatTemplate() {

        List<WechatTemplateProperties.WechatTemplate> templates = properties.getTemplates();

        // 过滤为Active的
        Optional<WechatTemplateProperties.WechatTemplate> wechatTemplate
                = templates.stream().
            filter((template) -> template.isActive()).//过滤剩下active为true的
            findFirst();

        return wechatTemplate.isPresent() ? wechatTemplate.get() : null;
    }

    @Override// 上报调查问卷填写结果
    public void templateReported(JSONObject reportInfo) {
        // kafka producer将数据推送至Kafka Topic
        log.info("templateReported : [{}]", reportInfo);
        String topicName = "jiangzh-topic";
        // 发送Kafka数据
        String templateId = reportInfo.getString("templateId");
        JSONArray reportData = reportInfo.getJSONArray("result");//回答

        // 如果templateid相同,后续在统计分析时,可以考虑将相同的id的内容放入同一个partition,便于分析使用
        ProducerRecord<String,Object> record =
                new ProducerRecord<>(topicName,templateId,reportData);

        /*
            1、Kafka Producer是线程安全的,建议多线程复用,如果每个线程都创建,出现大量的上下文切换或争抢的情况,影响Kafka效率
            2、Kafka Producer的key是一个很重要的内容:
                2.1 我们可以根据Key完成Partition的负载均衡
                2.2 合理的Key设计,可以让Flink、Spark Streaming之类的实时分析工具做更快速处理

            3、ack - all, kafka层面上就已经有了只有一次的消息投递保障,但是如果想真的不丢数据,最好自行处理异常
         */
        try{
            producer.send(record);
        }catch (Exception e){
            // 将数据加入重发队列, redis,es,...
        }

    }

    @Override// 获取调查问卷的统计结果
    public JSONObject templateStatistics(String templateId) {
        // 判断数据结果获取类型
        if(properties.getTemplateResultType() == 0){ // 文件获取
            return FileUtils.readFile2JsonObject(properties.getTemplateResultFilePath()).get();
        }else{
            // DB ..
        }
        return null;
    }
}
WechatTemplateService
package com.imooc.jiangzh.kafka.wechat.service;


import com.alibaba.fastjson.JSONObject;
import com.imooc.jiangzh.kafka.wechat.conf.WechatTemplateProperties;

public interface WechatTemplateService {

    // 获取微信调查问卷模板 - 获取目前active为true的模板就可以了
    WechatTemplateProperties.WechatTemplate getWechatTemplate();

    // 上报调查问卷填写结果
    void templateReported(JSONObject reportInfo);

    // 获取调查问卷的统计结果
    JSONObject templateStatistics(String templateId);

}

Utils

FileUtils
package com.imooc.jiangzh.kafka.wechat.utils;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Optional;
import lombok.Cleanup;
import lombok.extern.slf4j.Slf4j;

/**
 * @author : jiangzh
 * @description : 文件工具类
 **/
@Slf4j
public class FileUtils {

    // 传一个文件路径,然后转成字符串返回
    public static String readFile(String filePath) throws IOException {
        @Cleanup//lombok标签,使我们不用写try catch和close
        BufferedReader reader = new BufferedReader(
            new FileReader(new File(filePath))
        );

        String lineStr = "";
        StringBuffer stringBuffer = new StringBuffer();
        while ((lineStr = reader.readLine()) != null) {
            stringBuffer.append(lineStr);
        }

        return stringBuffer.toString();
    }


    // 把文件内容转成json
    public static Optional<JSONObject> readFile2JsonObject(String filePath){
        try {
            String fileContent = readFile(filePath);//上面的函数
            log.info("readFile2Json fileContent: [{}]" , fileContent);
            // JSON.parseObject(fileContent)把上面的字符串转成json
            return Optional.ofNullable(JSON.parseObject(fileContent));//java.util.Optional;
        } catch (IOException e) {
            e.printStackTrace();
        }
        return Optional.empty();
    }

    // 转成json数组
    public static Optional<JSONArray> readFile2JsonArray(String filePath){
        try {
            String fileContent = readFile(filePath);
            log.info("readFile2JsonArray fileContent: [{}]" , fileContent);
            return Optional.ofNullable(JSON.parseArray(fileContent));
        } catch (IOException e) {
            e.printStackTrace();
        }
        return Optional.empty();
    }

}

conf

KafkaConf
package com.imooc.jiangzh.kafka.wechat.conf;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.util.Properties;

@Configuration
public class KafkaConf {

    @Autowired
    private KafkaProperties kafkaProperties;

    @Bean
    public Producer kafkaProducer(){
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        properties.put(ProducerConfig.ACKS_CONFIG, kafkaProperties.getAcksConfig());
        properties.put(ProducerConfig.RETRIES_CONFIG,"0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG,"16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG,"1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG,"33554432");

        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
//        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.imooc.jiangzh.kafka.producer.SamplePartition");

        // Producer的主对象
        Producer<String,String> producer = new KafkaProducer<>(properties);

        return producer;
    }

}
KafkaProperties
package com.imooc.jiangzh.kafka.wechat.conf;

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;

@Data
@Configuration
@ConfigurationProperties(prefix = "wechat.kafka")//解析application.yml中的前缀内容
public class KafkaProperties {

    private String bootstrapServers;
    private String acksConfig;
    private String partitionerClass;

}
WechatTemplateProperties

绑定application.yml中的配置,他的作用主要是告诉是哪个问题模板、问卷的汇总结果在哪里

package com.imooc.jiangzh.kafka.wechat.conf;

import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;

import java.util.List;

@Data
@Configuration
@ConfigurationProperties(prefix = "template")
public class WechatTemplateProperties {

    private List<WechatTemplate> templates;///template.json","active": true}
    private int templateResultType; // 0-文件获取 1-数据库获取 2-ES
    private String templateResultFilePath;//templateResult.json"

    @Data // getter setter
    public static class WechatTemplate{
        private String templateId;//模板编号
        private String templateFilePath;
        private boolean active;
    }
}

common

BaseResponseVO
package com.imooc.jiangzh.kafka.wechat.common;

import java.util.UUID;
import lombok.Data;

/* 公共返回对象 **/
@Data
public class BaseResponseVO<M> {

    private String requestId;
    private M result;

    // 只设置id
    public static<M> BaseResponseVO success(){
        BaseResponseVO baseResponseVO = new BaseResponseVO();
        baseResponseVO.setRequestId(genRequestId());
        return baseResponseVO;
    }

    // 设置id和result
    public static<M> BaseResponseVO success(M result){
        BaseResponseVO baseResponseVO = new BaseResponseVO();
        baseResponseVO.setRequestId(genRequestId());
        baseResponseVO.setResult(result);

        return baseResponseVO;
    }

    private static String genRequestId(){
        return UUID.randomUUID().toString();
    }
}
CorsFilter
package com.imooc.jiangzh.kafka.wechat.common;

import java.io.IOException;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.annotation.WebFilter;
import javax.servlet.http.HttpServletResponse;
import org.springframework.context.annotation.Configuration;

/**
 * @author : jiangzh
 * @description : 跨域问题解决
 **/
@WebFilter(filterName = "CorsFilter")
@Configuration
public class CorsFilter implements Filter {
    @Override
    public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException {
        HttpServletResponse response = (HttpServletResponse) res;
        response.setHeader("Access-Control-Allow-Origin","*");
        response.setHeader("Access-Control-Allow-Credentials", "true");
        response.setHeader("Access-Control-Allow-Methods", "POST, GET, PATCH, DELETE, PUT");
        response.setHeader("Access-Control-Max-Age", "3600");
        response.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept");
        chain.doFilter(req, res);
    }
}

main

package com.imooc.jiangzh.kafka;

import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;

@SpringBootApplication
public class KafkaStudyApplication {

    public static void main(String[] args) {
        SpringApplication.run(KafkaStudyApplication.class, args);
    }

}
logback.xml
<configuration>
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>%highlight(%-5level) (%file:%line\)- %m%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <root level="info">
        <appender-ref ref="STDOUT" />
    </root>
</configuration>
application.yml
server:
  port: 443
  ssl:
    key-store: 3702162_wechat.coding-jiangzh.com.pfx
    key-store-password: 49Jk65Pi

template:
  templates:
    - {"templateId":"1",
    "templateFilePath":"F:/workSpace/mooc_workspace/kafka_study/src/main/resources/template/template.json",
    "active": true}
    - {"templateId":"2",
    "templateFilePath":"F:/workSpace/mooc_workspace/kafka_study/src/main/resources/template/template.json",
    "active": false}
  template-result-type: 0  # 0-文件获取 1-数据库获取 2-ES
  template-result-file-path: "F:/workSpace/mooc_workspace/kafka_study/src/main/resources/template/templateResult.json"

wechat:
  kafka:
    bootstrap_servers: "192.168.220.128:9092"
    acks_config: "all"
    partitioner_class: "com.imooc.jiangzh.kafka.producer.SamplePartition"

logging:
  config: classpath:logback.xml
template.json

问题模板

[
    {
        "questionId": "1",
        "question": "今天几号",
        "answer": "",
        "options": [
            {"label": "1号", "value": "A"},
            {"label": "2号", "value": "B"},
            {"label": "3号", "value": "C"},
            {"label": "4号", "value": "D"}
        ]},
    {
        "questionId": "2",
        "question": "你喜爱的颜色",
        "answer": "",
        "options": [
            {"label": "红色", "value": "A"},
            {"label": "黄色", "value": "B"},
            {"label": "绿色", "value": "C"},
            {"label": "紫色", "value": "D"}
        ]}
]
templateResult.json

调查结果

{
  "templateId": "001",
  "totalNumber": "102",#多少人作答
  "statistics": [
    {
      "questionId": "1",
      "question": "今天几号",
      "answers": [
        {"label": "A", "value": 10},#A选项有10{"label": "B", "value": 50},
        {"label": "C", "value": 12},
        {"label": "D", "value": 17}
      ]
    },
    {
      "questionId": "2",
      "question": "你喜爱的颜色",
      "answers": [
        {"label": "A", "value": 12},
        {"label": "B", "value": 52},
        {"label": "C", "value": 17},
        {"label": "D", "value": 17}
      ]
    }
  ]
}

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.2.5.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.imooc.jiangzh</groupId>
    <artifactId>kafka-study</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>kafka-study</name>
    <description>慕课网学习Kafka使用</description>

    <properties>
        <java.version>1.8</java.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>cn.hutool</groupId>
            <artifactId>hutool-all</artifactId>
            <version>4.1.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.4.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-streams</artifactId>
            <version>2.4.0</version>
        </dependency>
        <dependency>
            <!--idea里也要安装lombok插件-->
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.12</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>com.google.guava</groupId>
            <artifactId>guava</artifactId>
            <version>28.2-jre</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.68</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>
</project>

admin

package com.imooc.jiangzh.kafka.admin;

import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.server.quota.ClientQuotaEntity;

import java.util.*;
import java.util.concurrent.ExecutionException;

public class AdminSample {

    public final static String TOPIC_NAME="jiangzh-topic";

    public static void main(String[] args) throws Exception {
//        AdminClient adminClient = AdminSample.adminClient();
//        System.out.println("adminClient : "+ adminClient);
        // 创建Topic实例
//        createTopic();
        // 删除Topic实例
//        delTopics();
        // 获取Topic列表
//        topicLists();
        // 描述Topic
        describeTopics();
        // 修改Config
//        alterConfig();
        // 查询Config
//        describeConfig();
        // 增加partition数量
//        incrPartitions(2);
    }

    /*
        增加partition数量
     */
    public static void incrPartitions(int partitions) throws Exception{
        AdminClient adminClient = adminClient();
        Map<String, NewPartitions> partitionsMap = new HashMap<>();
        NewPartitions newPartitions = NewPartitions.increaseTo(partitions);
        partitionsMap.put(TOPIC_NAME, newPartitions);


        CreatePartitionsResult createPartitionsResult = adminClient.createPartitions(partitionsMap);
        createPartitionsResult.all().get();
    }

    /*
        修改Config信息
     */
    public static void alterConfig() throws Exception{
        AdminClient adminClient = adminClient();
//        Map<ConfigResource,Config> configMaps = new HashMap<>();
//
//        // 组织两个参数
//        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC_NAME);
//        Config config = new Config(Arrays.asList(new ConfigEntry("preallocate","true")));
//        configMaps.put(configResource,config);
//        AlterConfigsResult alterConfigsResult = adminClient.alterConfigs(configMaps);

        /*
            从 2.3以上的版本新修改的API
         */
        Map<ConfigResource,Collection<AlterConfigOp>> configMaps = new HashMap<>();
        // 组织两个参数
        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC_NAME);
        AlterConfigOp alterConfigOp =
                new AlterConfigOp(new ConfigEntry("preallocate","false"),AlterConfigOp.OpType.SET);
        configMaps.put(configResource,Arrays.asList(alterConfigOp));

        AlterConfigsResult alterConfigsResult = adminClient.incrementalAlterConfigs(configMaps);
        alterConfigsResult.all().get();
    }

    /*
        查看配置信息
        ConfigResource(type=TOPIC, name='jiangzh-topic') ,
        Config(
            entries=[
             ConfigEntry(
               name=compression.type,
               value=producer,
               source=DEFAULT_CONFIG,
               isSensitive=false,
               isReadOnly=false,
               synonyms=[]),
             ConfigEntry(
                name=leader.replication.throttled.replicas,
                value=,
                source=DEFAULT_CONFIG,
                isSensitive=false,
                isReadOnly=false,
                synonyms=[]), ConfigEntry(name=message.downconversion.enable, value=true, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=min.insync.replicas, value=1, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=segment.jitter.ms, value=0, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=cleanup.policy, value=delete, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=flush.ms, value=9223372036854775807, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=follower.replication.throttled.replicas, value=, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=segment.bytes, value=1073741824, source=STATIC_BROKER_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=retention.ms, value=604800000, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=flush.messages, value=9223372036854775807, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=message.format.version, value=2.4-IV1, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=file.delete.delay.ms, value=60000, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=max.compaction.lag.ms, value=9223372036854775807, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=max.message.bytes, value=1000012, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=min.compaction.lag.ms, value=0, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=message.timestamp.type, value=CreateTime, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]),
             ConfigEntry(name=preallocate, value=false, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=min.cleanable.dirty.ratio, value=0.5, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=index.interval.bytes, value=4096, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=unclean.leader.election.enable, value=false, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=retention.bytes, value=-1, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=delete.retention.ms, value=86400000, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=segment.ms, value=604800000, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=message.timestamp.difference.max.ms, value=9223372036854775807, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[]), ConfigEntry(name=segment.index.bytes, value=10485760, source=DEFAULT_CONFIG, isSensitive=false, isReadOnly=false, synonyms=[])])
     */
    public static void describeConfig() throws Exception{
        AdminClient adminClient = adminClient();
        // TODO 这里做一个预留,集群时会讲到
//        ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, TOPIC_NAME);

        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC_NAME);
        DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(Arrays.asList(configResource));
        Map<ConfigResource, Config> configResourceConfigMap = describeConfigsResult.all().get();
        configResourceConfigMap.entrySet().stream().forEach((entry)->{
            System.out.println("configResource : "+entry.getKey()+" , Config : "+entry.getValue());
        });
    }

    /*
        描述Topic
        name :jiangzh-topic ,
        desc: (name=jiangzh-topic,
            internal=false,
            partitions=
                (partition=0,
                 leader=192.168.220.128:9092
                 (id: 0 rack: null),
                 replicas=192.168.220.128:9092
                 (id: 0 rack: null),
                 isr=192.168.220.128:9092
                 (id: 0 rack: null)),
                 authorizedOperations=[])
     */
    public static void describeTopics() throws Exception {
        AdminClient adminClient = adminClient();
        DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(TOPIC_NAME));
        Map<String, TopicDescription> stringTopicDescriptionMap = describeTopicsResult.all().get();
        Set<Map.Entry<String, TopicDescription>> entries = stringTopicDescriptionMap.entrySet();
        entries.stream().forEach((entry)->{
            System.out.println("name :"+entry.getKey()+" , desc: "+ entry.getValue());
        });
    }

    /*
        删除Topic
     */
    public static void delTopics() throws Exception {
        AdminClient adminClient = adminClient();
        DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Arrays.asList(TOPIC_NAME));
        deleteTopicsResult.all().get();
    }

    /*
        获取Topic列表
     */
    public static void topicLists() throws Exception {
        AdminClient adminClient = adminClient();
        // 是否查看internal选项
        ListTopicsOptions options = new ListTopicsOptions();
        options.listInternal(true);
//        ListTopicsResult listTopicsResult = adminClient.listTopics();
        ListTopicsResult listTopicsResult = adminClient.listTopics(options);
        Set<String> names = listTopicsResult.names().get();
        Collection<TopicListing> topicListings = listTopicsResult.listings().get();
        KafkaFuture<Map<String, TopicListing>> mapKafkaFuture = listTopicsResult.namesToListings();
        // 打印names
        names.stream().forEach(System.out::println);
        // 打印topicListings
        topicListings.stream().forEach((topicList)->{
            System.out.println(topicList);
        });
    }

    /*
        创建Topic实例
     */
    public static void createTopic() {
        AdminClient adminClient = adminClient();
        // 副本因子
        Short rs = 1;
        NewTopic newTopic = new NewTopic(TOPIC_NAME, 1 , rs);
        CreateTopicsResult topics = adminClient.createTopics(Arrays.asList(newTopic));
        System.out.println("CreateTopicsResult : "+ topics);
    }

    /*
        设置AdminClient
     */
    public static AdminClient adminClient(){
        Properties properties = new Properties();
        properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.220.128:9092");

        AdminClient adminClient = AdminClient.create(properties);
        return adminClient;
    }

}

producer

package com.imooc.jiangzh.kafka.producer;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;

import java.util.Map;

public class SamplePartition implements Partitioner {
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        /*
            key-1
            key-2
            key-3
         */
        String keyStr = key + "";
        String keyInt = keyStr.substring(4);
        System.out.println("keyStr : "+keyStr + "keyInt : "+keyInt);

        int i = Integer.parseInt(keyInt);

        return i%2;
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

stream

package com.imooc.jiangzh.kafka.stream;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;
import org.apache.kafka.streams.kstream.Produced;

import java.util.Arrays;
import java.util.Locale;
import java.util.Properties;

public class StreamSample {

    private static final String INPUT_TOPIC="jiangzh-stream-in";
    private static final String OUT_TOPIC="jiangzh-stream-out";

    public static void main(String[] args) {
        Properties props = new Properties();
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.220.128:9092");
        props.put(StreamsConfig.APPLICATION_ID_CONFIG,"wordcount-app");
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        // 如果构建流结构拓扑
        final StreamsBuilder builder = new StreamsBuilder();
        // 构建Wordcount
//        wordcountStream(builder);
        // 构建foreachStream
        foreachStream(builder);

        final KafkaStreams streams = new KafkaStreams(builder.build(), props);

        streams.start();
    }

    // 如果定义流计算过程
    static void foreachStream(final StreamsBuilder builder){
        KStream<String,String> source = builder.stream(INPUT_TOPIC);
        source
                .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split(" ")))
                .foreach((key,value)-> System.out.println(key + " : " + value));
    }

    // 如果定义流计算过程
    static void wordcountStream(final StreamsBuilder builder){
        // 不断从INPUT_TOPIC上获取新数据,并且追加到流上的一个抽象对象
        KStream<String,String> source = builder.stream(INPUT_TOPIC);
        // Hello World imooc
        // KTable是数据集合的抽象对象
        // 算子
        final KTable<String, Long> count =
                source
                        // flatMapValues -> 将一行数据拆分为多行数据  key 1 , value Hello World
                        // flatMapValues -> 将一行数据拆分为多行数据  key 1 , value Hello key xx , value World
                        /*
                            key 1 , value Hello   -> Hello 1  World 2
                            key 2 , value World
                            key 3 , value World
                         */
                        .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split(" ")))
                        // 合并 -> 按value值合并
                        .groupBy((key, value) -> value)
                        // 统计出现的总数
                        .count();

        // 将结果输入到OUT_TOPIC中
        count.toStream().to(OUT_TOPIC, Produced.with(Serdes.String(),Serdes.Long()));
    }

}

consumer

ConsumerSample
package com.imooc.jiangzh.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.internals.Topic;

import java.time.Duration;
import java.util.*;

public class ConsumerSample {
    private final static String TOPIC_NAME="jiangzh-topic";
    public static void main(String[] args) {
//        helloworld();
        // 手动提交offset
//        commitedOffset();
        // 手动对每个Partition进行提交
//        commitedOffsetWithPartition();
        // 手动订阅某个或某些分区,并提交offset
//        commitedOffsetWithPartition2();
        // 手动指定offset的起始位置,及手动提交offset
//        controlOffset();
        // 流量控制
        controlPause();
    }


    /*
        工作里这种用法,有,但是不推荐
     */
    private static void helloworld(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(props);
        // 消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                        record.partition(),record.offset(), record.key(), record.value());
        }
    }

    /*
        手动提交offset
     */
    private static void commitedOffset() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "false");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
        // 消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String, String> record : records) {
                // 想把数据保存到数据库,成功就成功,不成功...
                // TODO record 2 db
                System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                        record.partition(), record.offset(), record.key(), record.value());
                // 如果失败,则回滚, 不要提交offset
            }

            // 如果成功,手动通知offset提交
            consumer.commitAsync();
        }
    }


    /*
        手动提交offset,并且手动控制partition
     */
    private static void commitedOffsetWithPartition() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "false");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
        // 消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
           ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
           // 每个partition单独处理
           for(TopicPartition partition : records.partitions()){
               List<ConsumerRecord<String, String>> pRecord = records.records(partition);
               for (ConsumerRecord<String, String> record : pRecord) {
                   System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                           record.partition(), record.offset(), record.key(), record.value());

               }
               long lastOffset = pRecord.get(pRecord.size() -1).offset();
               // 单个partition中的offset,并且进行提交
               Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
               offset.put(partition,new OffsetAndMetadata(lastOffset+1));
               // 提交offset
               consumer.commitSync(offset);
               System.out.println("=============partition - "+ partition +" end================");
           }
        }
    }

    /*
        手动提交offset,并且手动控制partition,更高级
     */
    private static void commitedOffsetWithPartition2() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "false");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // jiangzh-topic - 0,1两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);

        // 消费订阅哪一个Topic或者几个Topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        // 消费订阅某个Topic的某个分区
        consumer.assign(Arrays.asList(p0));

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            // 每个partition单独处理
            for(TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());

                }
                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                // 单个partition中的offset,并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("=============partition - "+ partition +" end================");
            }
        }
    }


    /*
        手动指定offset的起始位置,及手动提交offset
     */
    private static void controlOffset() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "false");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // jiangzh-topic - 0,1两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);

        // 消费订阅某个Topic的某个分区
        consumer.assign(Arrays.asList(p0));

        while (true) {
            // 手动指定offset起始位置
            /*
                1、人为控制offset起始位置
                2、如果出现程序错误,重复消费一次
             */
            /*
                1、第一次从0消费【一般情况】
                2、比如一次消费了100条, offset置为101并且存入Redis
                3、每次poll之前,从redis中获取最新的offset位置
                4、每次从这个位置开始消费
             */
            consumer.seek(p0, 700);

            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            // 每个partition单独处理
            for(TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.err.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());

                }
                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                // 单个partition中的offset,并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("=============partition - "+ partition +" end================");
            }
        }
    }


    /*
        流量控制 - 限流
     */
    private static void controlPause() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.220.128:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "false");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // jiangzh-topic - 0,1两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);

        // 消费订阅某个Topic的某个分区
        consumer.assign(Arrays.asList(p0,p1));
        long totalNum = 40;
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            // 每个partition单独处理
            for(TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                long num = 0;
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                    /*
                        1、接收到record信息以后,去令牌桶中拿取令牌
                        2、如果获取到令牌,则继续业务处理
                        3、如果获取不到令牌, 则pause等待令牌
                        4、当令牌桶中的令牌足够, 则将consumer置为resume状态
                     */
                    num++;
                    if(record.partition() == 0){
                        if(num >= totalNum){
                            consumer.pause(Arrays.asList(p0));
                        }
                    }

                    if(record.partition() == 1){
                        if(num == 40){
                            consumer.resume(Arrays.asList(p0));
                        }
                    }
                }

                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                // 单个partition中的offset,并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("=============partition - "+ partition +" end================");
            }
        }
    }

}

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值