javax
javaee-api
7.0
org.apache.kafka
kafka_2.9.2
0.8.1.1
org.apache.kafka
kafka-clients
RELEASE
2. webSocket服务端实现
1 //此处定义接口的uri
2 @ServerEndpoint("/wbSocket")
3 public class WebSocket {
4 private Session session;
5 public static CopyOnWriteArraySet wbSockets = new CopyOnWriteArraySet(); //此处定义静态变量,以在其他方法中获取到所有连接
6
7 /**
8 * 建立连接。
9 * 建立连接时入参为session
10 */
11 @OnOpen
12 public void onOpen(Session session){
13 this.session = session;
14 wbSockets.add(this); //将此对象存入集合中以在之后广播用,如果要实现一对一订阅,则类型对应为Map。由于这里广播就可以了随意用Set
15 System.out.println("New session insert,sessionId is "+ session.getId());
16 }
17 /**
18 * 关闭连接
19 */
20 @OnClose
21 public void onClose(){
22 wbSockets.remove(this);//将socket对象从集合中移除,以便广播时不发送次连接。如果不移除会报错(需要测试)
23 System.out.println("A session insert,sessionId is "+ session.getId());
24 }
25 /**
26 * 接收前端传过来的数据。
27 * 虽然在实现推送逻辑中并不需要接收前端数据,但是作为一个webSocket的教程或叫备忘,还是将接收数据的逻辑加上了。
28 */
29 @OnMessage
30 public void onMessage(String message ,Session session){
31 System.out.println(message + "from " + session.getId());
32 }
33
34 public void sendMessage(String message) throws IOException {
35 this.session.getBasicRemote().sendText(message);
36 }
37 }
3. kafka消费者实现
1 public class ConsumerKafka extends Thread {
2
3 private KafkaConsumer consumer;
4 private String topic = "kafkaTopic";
5
6 public ConsumerKafka(){
7
8 }
9
10 @Override
11 public void run(){
12 //加载kafka消费者参数
13 Properties props = new Properties();
14 props.put("bootstrap.servers", "localhost:9092");
15 props.put("group.id", "ytna");
16 props.put("enable.auto.commit", "true");
17 props.put("auto.commit.interval.ms", "1000");
18 props.put("session.timeout.ms", "15000");
19 props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
20 props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
21 //创建消费者对象
22 consumer = new KafkaConsumer(props);
23 consumer.subscribe(Arrays.asList(this.topic));
24 //死循环,持续消费kafka
25 while (true){
26 try {
27 //消费数据,并设置超时时间
28 ConsumerRecords records = consumer.poll(100);
29 //Consumer message
30 for (ConsumerRecord record : records) {
31 //Send message to every client
32 for (WebSocket webSocket :wbSockets){
33 webSocket.sendMessage(record.value());
34 }
35 }
36 }catch (IOException e){
37 System.out.println(e.getMessage());
38 continue;
39 }
40 }
41 }
42
43 public void close() {
44 try {
45 consumer.close();
46 } catch (Exception e) {
47 System.out.println(e.getMessage());
48 }
49 }
50
51 //供测试用,若通过tomcat启动需通过其他方法启动线程
52 public static void main(String[] args){
53 ConsumerKafka consumerKafka = new ConsumerKafka();
54 consumerKafka.start();
55 }
56 }
P.S. 需要注意的是WebSocket对tomcat版本是有要求的,笔者使用的是7.0.7.8。
四. 前端简单实现
1
2
3
4
5
WebSocket client6
7 var socket;
8 if (typeof (WebSocket) == "undefined"){
9 alert("This explorer don't support WebSocket")
10 }
11
12 function connect() {
13 //Connect WebSocket server
14 socket =new WebSocket("ws://127.0.0.1:8080/wbSocket");
15 //open
16 socket.onopen = function () {
17 alert("WebSocket is open");
18 }
19 //Get message
20 socket.onmessage = function (msg) {
21 alert("Message is " + msg);
22 }
23 //close
24 socket.onclose = function () {
25 alert("WebSocket is closed");
26 }
27 //error
28 socket.onerror = function (e) {
29 alert("Error is " + e);
30 }
31 }
32
33 function close() {
34 socket.close();
35 }
36
37 function sendMsg() {
38 socket.send("This is a client message ");
39 }
40
41
42
43 connect
44 close
45 sendMsg
46
47
五. 结语
以上基本可以实现将kafka数据实时推送到前端。这是笔者第一篇笔记,不足之处请指出、谅解。
源码:https://github.com/youtNa/webSocketkafka