通过重写接口抽象方法的方式获取接口返回的数据并做业务处理

本文记录一下自己工作中遇到的场景。如何通过重写接口抽象方法的方式获取接口返回的数据,并做自己的业务处理。这里用到kafka作为数据的来源。也可以不使用,不使用的话,代码里面有注释怎么做。

一. 接口层的代码实现

1.新建接口类 IManager

package com.cwp.jar.impl;

/**
 * .业务逻辑接口类
 *
 * @since:
 * @create: 2020-07-10 10:55
 */
public interface IManager {

    /**
     * 消息处理, 让接口调用者去重写该方法
     */
    int process(long offset, String pram);

}

2.新建业务逻辑类 ManagerService

package com.cwp.jar.service;

import com.cwp.jar.impl.IManager;
import com.cwp.jar.kafka.Consumer;

/**
 * .业务逻辑层
 *
 * @create: 2020-07-10 11:03
 * @since:
 */
public class ManagerService {

    private static ManagerService instance;

    private ManagerService() {
    }

    public static ManagerService getInstance() {
        if (instance == null) {
            instance = new ManagerService();
        }
        return instance;
    }

    /**
     * 对外提供的接口
     *
     * @param manager 接口调用时传入的类名,并实现里面的方法,即可获取到kafka返回的消息,并做自己的业务逻辑
     */
    public void handle(IManager manager) {
        Consumer consumer = new Consumer(manager);
        consumer.init();

        // TODO 以下代码是没有使用kafka消费者的情况
        // int result;
        // for (long i = 0; i < 20; i++) {
        //     result = manager.process(i, "hello..." + i);
        //     if (result == 0) {
        //         System.out.println("业务处理完成...");
        //     } else {
        //         System.out.println("处理失败...");
        //     }
        //     try {
        //         Thread.sleep(1000);
        //     } catch (InterruptedException e) {
        //         e.printStackTrace();
        //     }
        // }

    }

}

3.新建kafka消费者类 Consumer

package com.cwp.jar.kafka;

import com.cwp.jar.impl.IManager;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * .kafka 消费者类
 *
 * @create: 2020-06-05 11:42
 * @since:
 */
public class Consumer {

    private static KafkaConsumer consumer = null;

    private IManager iManager;

    public Consumer(IManager iManager) {
        this.iManager = iManager;
    }

    public void init() {
        while (consumer == null) {
            consumer = this.getKafkaConsumer(); // 创建消费者
            try {
                TimeUnit.SECONDS.sleep(3);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        consumer.subscribe(Arrays.asList("test")); // 消费者订阅的topic, 可同时订阅多个
        System.out.println("成功订阅主题 ===> " + Arrays.asList("test"));
        while (true) {
            try {
                ConsumerRecords<String, byte[]> records = consumer.poll(100); // 读取数据,读取超时时间为100ms
                for (ConsumerRecord<String, byte[]> record : records) {
                    byte[] data = record.value();
                    System.out.println("接收到的主题 = " + record.topic() + " \t 分区 = " + record.partition() + "\t offset = "
                        + record.offset() + "\t 接收到的内容 = " + new String(data) + "\t key = " + record.key());

                    this.handleRecord(record); // 业务处理

                    this.commitOffset(record);
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * 消息处理方法, 交给接口调用者去实现业务逻辑
     */
    private void handleRecord(ConsumerRecord<String, byte[]> record) {
        int result = iManager.process(record.offset(), new String(record.value()));
        if (result == 0) {
            System.out.println("业务处理成功...");
        } else {
            System.out.println("业务处理失败...");
        }
    }

    /**
     * 提交 offset
     */
    private void commitOffset(ConsumerRecord<String, byte[]> record) {
        Map<TopicPartition, OffsetAndMetadata> commitDataMap = new HashMap<>();
        TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
        OffsetAndMetadata meta = new OffsetAndMetadata(record.offset());
        commitDataMap.put(topicPartition, meta);
        consumer.commitSync(commitDataMap);
        System.out.println("提交offset成功... offset = " + (record.offset()));
    }

    /**
     * 创建kafka消费者
     */
    private KafkaConsumer<String, byte[]> getKafkaConsumer() {
        Properties props = new Properties();
        // 定义kakfa 服务的地址,不需要将所有broker指定上
        props.put("bootstrap.servers", "192.168.126.128:9092");
        // 制定consumer group
        props.put("group.id", "test");
        // 是否自动确认offset
        props.put("enable.auto.commit", "false");
        // 自动确认offset的时间间隔
        props.put("auto.commit.interval.ms", "1000");
        // key的序列化类
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // value的序列化类
        props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");

        // 定义consumer
        return new KafkaConsumer<>(props);
    }
}

二. 调用层的代码实现

1.新建测试类 Main

package com.cwp.jar.init;

import com.cwp.jar.impl.IManager;
import com.cwp.jar.service.ManagerService;

/**
 * .具体接口调用的业务类
 *
 * @create: 2020-07-10 11:18
 * @since:
 */
public class Main {

    public static void main(String[] args) {

        ManagerService managerService = ManagerService.getInstance();

        managerService.handle(new IManager() {
            @Override
            public int process(long offset, String pram) {
                System.out.println("业务处理中... offset = " + offset + " pram = " + pram);
                return 0;
            }
        });

    }
}

至此,代码已经完成,启动main方法即可获取到接口返回的数据。

最后,附上pom文件的依赖和kafka生产者的代码提供参考

        <!-- Kafka消息队列 -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.9.0.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>0.9.0.1</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.5</version>
        </dependency>

kafka生产者类 Producer

package com.cwp.kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * .kafka生产者类
 *
 * @create: 2020-06-05 11:45
 * @since:
 */
public class Producer {

    private static KafkaProducer<String, byte[]> producer = null;

    public static void main(String[] args) {
        new Thread(() -> {
            int index = 0;
            while (true) {
                send("test", 0, String.valueOf(index), ("hello " + index).getBytes());
                index++;
                try {
                    TimeUnit.SECONDS.sleep(1);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }).start();
    }

    private static void send(String topic, int partition, String key, byte[] packet) {
        while (producer == null) {
            try {
                TimeUnit.SECONDS.sleep(1);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            producer = getKafkaProducer();
        }
        producer.send(new ProducerRecord<>(topic, partition, key, packet), (metadata, exception) -> {
            if (metadata != null) {
                System.out.println(
                    "同步发送的 主题 = " + metadata.topic() + "\t 分区 = " + metadata.partition() + "\t offset ==> "
                        + metadata.offset() + "\t 大小 = " + packet.length + "\t 内容 = " + new String(packet));
            }
            if (exception != null) {
                exception.printStackTrace();
            }
        });
    }

    private static KafkaProducer<String, byte[]> getKafkaProducer() {
        Properties props = new Properties();
        // Kafka服务端的主机名和端口号
        props.put("bootstrap.servers", "192.168.126.128:9092");
        // 等待所有副本节点的应答
        props.put("acks", "all");
        // 消息发送最大尝试次数
        props.put("retries", 0);
        // 一批消息处理大小
        props.put("batch.size", 20971520);
        props.put("max.request.size", 2097152);
        // 增加服务端请求延时
        props.put("linger.ms", 1);
        // 发送缓存区内存大小
        props.put("buffer.memory", 33554432);
        // key序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        // value序列化
        props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
        return new KafkaProducer<>(props);
    }

}

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值