Java:基于websocket实现Kafka消息推送

1 pom.xml

<dependencies>
        <!-- webSocket所需依赖 -->
        <dependency>
            <groupId>javax</groupId>
            <artifactId>javaee-api</artifactId>
            <version>7.0</version>
        </dependency>
        <!-- kafka 所需依赖 -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.9.2</artifactId>
            <version>0.8.1.1</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>RELEASE</version>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-nop</artifactId>
            <version>1.7.2</version>
        </dependency>
    </dependencies>

2 web套接字

  • websocket
package com.kafkaweb;

import java.io.IOException;
import java.util.concurrent.CopyOnWriteArraySet;

import javax.websocket.*;
import javax.websocket.server.ServerEndpoint;

@ServerEndpoint("/websocket")
public class WebSocketTest {
    private static int onlineCount = 0;
    public static CopyOnWriteArraySet<WebSocketTest> webSocketSet = new CopyOnWriteArraySet<WebSocketTest>();
    private Session session;
    @OnOpen
    public void onOpen(Session session){
        this.session = session;
        webSocketSet.add(this);     //加入set中
        addOnlineCount();           //在线数加1
        System.out.println("有新连接加入!当前在线人数为" + getOnlineCount());
    }

    @OnClose
    public void onClose(){
        webSocketSet.remove(this);  //从set中删除
        subOnlineCount();           //在线数减1
        System.out.println("有一连接关闭!当前在线人数为" + getOnlineCount());
    }

    @OnMessage
    public void onMessage(String message, Session session) {
        System.out.println("来自客户端的消息:" + message);
        //群发消息
        for(WebSocketTest item: webSocketSet){
            try {
                item.sendMessage(message);
            } catch (IOException e) {
                e.printStackTrace();
                continue;
            }
        }
    }

    @OnError
    public void onError(Session session, Throwable error){
        System.out.println("发生错误");
        error.printStackTrace();
    }

    
    public void sendMessage(String message) throws IOException{
        this.session.getBasicRemote().sendText(message);
    }

    public static synchronized int getOnlineCount() {
        return onlineCount;
    }

    public static synchronized void addOnlineCount() {
        WebSocketTest.onlineCount++;
    }
    public static synchronized void subOnlineCount() {
        WebSocketTest.onlineCount--;
    }
}

3 生产者

package com.kafkaweb;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import java.util.Properties;

public class ProducerKafka {
    private final Producer<String, String> producer;
    public final static String TOPIC = "kafkaTopicWebSocket";

    private ProducerKafka(){
        Properties props = new Properties();
        //此处配置的是kafka的端口
        props.put("metadata.broker.list", "127.0.0.1:9092");

        //配置value的序列化类
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        //配置key的序列化类
        props.put("key.serializer.class", "kafka.serializer.StringEncoder");

        props.put("request.required.acks","-1");
        producer = new Producer<String, String>(new ProducerConfig(props));
    }

    void produce() {
        int messageNo = 100;
        final int COUNT = 10000000;

        while (messageNo < COUNT) {
            String key = String.valueOf(messageNo);
            String data = "hello kafka message " + key;
            producer.send(new KeyedMessage<String, String>(TOPIC, key ,data));
            System.out.println(data);
            messageNo ++;
        }
    }
    //http://www.open-open.com/lib/view/open1412991579999.html
    public static void main( String[] args ) {
        ProducerKafka sendMsg = new ProducerKafka();
        sendMsg.produce();
    }
}

4 消费者

package com.kafkaweb;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;

import javax.websocket.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Arrays;
import java.io.IOException;

import static com.kafkaweb.WebSocketTest.webSocketSet;

public class ConsumerKafka extends Thread {
    // 消费者
    private final ConsumerConnector consumer;

    public ConsumerKafka(){
        Properties props = new Properties();
        props.put("zookeeper.connect", "127.0.0.1:2181");
        props.put("group.id", "jd-group");
        props.put("zookeeper.session.timeout.ms", "4000");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");
        props.put("auto.offset.reset", "smallest");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        // 配置生效
        ConsumerConfig config = new ConsumerConfig(props);
        // 消费者配置
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);

    }

    @Override
    public void run(){
        Map<String, Integer> topicCountMap = new HashMap<>();
        topicCountMap.put(ProducerKafka.TOPIC, new Integer(1));
        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

        Map<String, List<KafkaStream<String, String>>> consumerMap =
                consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
        KafkaStream<String, String> stream = consumerMap.get(ProducerKafka.TOPIC).get(0);

        while(true){
            try{
                ConsumerIterator<String, String> it = stream.iterator();
                System.out.println(it.next().message());
                for(WebSocketTest webSocket: webSocketSet){
                    webSocket.sendMessage(it.next().message());
                }
            }catch (IOException e){
                System.out.println(e.getMessage());
                continue;
            }
        }
         //供测试用,若通过tomcat启动需通过其他方法启动线程
    public static void main(String[] args){
        ConsumerKafka consumerKafka = new ConsumerKafka();
        consumerKafka.start();
    }
}

5 启动流程

Created with Raphaël 2.2.0 开始 启动zookeeper 启动kafka 启动消费者 启动Tomcat 启动生产者 结束
  • 启动zookeeper
bin/zookeeper-server-start.sh config/zookeeper.properties
  • 启动kafka
bin/kafka-server-start.sh config/server.properties
  • 启动消费者
    项目中的RunThread
  • 启动Tomcat
    配置Tomcat,启动
    Tomcat配置参考:Maven部署JavaWeb
  • 启动生产者
    项目中的ProducerKafka启动Run即可
    运行生产者参考:Maven部署Kafka

传送门:https://github.com/xindaqi/java.git


【参考文献】
[1]https://juejin.im/post/5b20e2e16fb9a01e2c698c51
[2]https://www.cnblogs.com/xdp-gacl/p/5193279.html
[3]https://blog.csdn.net/weixin_38175358/article/details/85105730
[4]https://blog.csdn.net/Xin_101/article/details/97813718
[5]https://blog.csdn.net/Xin_101/article/details/97938371

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

天然玩家

坚持才能做到极致

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值