工具类
负责对象字节数组的相互转换,传输数据用
package com.yq.utils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; public class BeanUtil { /** * @Description 对象转字节数组 * @param obj Object对象 * @return byte[] 字节数组 * @author SUNBIN * @date 2017年9月8日 */ public static byte[] ObjectToBytes(Object obj){ byte[] bytes = null; ByteArrayOutputStream bo = null; ObjectOutputStream oo = null; try { bo = new ByteArrayOutputStream(); oo = new ObjectOutputStream(bo); oo.writeObject(obj); bytes = bo.toByteArray(); } catch (IOException e) { e.printStackTrace(); }finally { try { if(bo!=null){ bo.close(); } if(oo!=null){ oo.close(); } } catch (IOException e) { e.printStackTrace(); } } return bytes; } /** * @Description 字节数组转对象 * @param bytes 字节数组 * @return Object Object对象 * @author SUNBIN * @date 2017年9月8日 */ public static Object BytesToObject(byte[] bytes){ Object obj = null; ByteArrayInputStream bi = null; ObjectInputStream oi = null; try { bi =new ByteArrayInputStream(bytes); oi =new ObjectInputStream(bi); obj = oi.readObject(); } catch (Exception e) { e.printStackTrace(); }finally { try { if(bi!=null){ bi.close(); } if(oi!=null){ oi.close(); } } catch (IOException e) { e.printStackTrace(); } } return obj; } }
消息实体类
需要继承Serializable接口和Encoder<MsgEntity>接口,并且实现构造函数MsgEntity(VerifiableProperties verifiableProperties),否则会报错无法运行起来
package com.yq.dto; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectOutputStream; import java.io.Serializable; import kafka.serializer.Encoder; import kafka.utils.VerifiableProperties; /** * @Description kafka传递图片url消息实体类*/ public class MsgEntity implements Serializable, Encoder<MsgEntity>{ private static final long serialVersionUID = 4948961384423258719L; /**图片id*/ private int id; /**图片url*/ private String url; /** * 构造函数 * @param id 图片id * @param url 图片url */ public MsgEntity(int id, String url){ this.id = id; this.url = url; } /** * 构造函数,kafka支持 * @param verifiableProperties */ public MsgEntity(VerifiableProperties verifiableProperties){} /** * 实现kafka实体类转换字节数组接口 */ @Override public byte[] toBytes(ImgUrlMessage entity) {return BeanUtil.ObjectToBytes(entity); } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public int getId() { return id; } public void setId(int id) { this.id = id; } }
Producer
设置Producer的"serializer.class"指定为MsgEntity.class.getName()
package com.kafka; import java.util.Properties; import java.util.concurrent.TimeUnit; import kafka.javaapi.producer.Producer; import kafka.producer.KeyedMessage; import kafka.producer.ProducerConfig; import kafka.serializer.StringEncoder; public class ProducerDemo extends Thread{ //指定具体的topic private String topic; //构造函数 public ProducerDemo(String topic){ this.topic = topic; } @SuppressWarnings({ "deprecation", "unchecked", "rawtypes" }) public void run(){ //创建一个producer对象 Producer producer = createProducer(); int i=0; while(true){ //使用producer发送数据 producer.send(new KeyedMessage<Integer, Object>(this.topic, new MsgEntity(i, "www.baidu.com?id="+i))); System.out.println("Producer发送数据:" + i); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { e.printStackTrace(); } i++; } } @SuppressWarnings({ "deprecation", "rawtypes" }) private Producer createProducer(){ Properties prop = new Properties(); //声明zk prop.put("zookeeper.connect", "node1.sunny.cn:2181,node2.sunny.cn:2181,node3.sunny.cn:2181"); // 指定message的序列化方法,用户可以通过实现kafka.serializer.Encoder接口自定义该类 // 默认情况下message的key和value都用相同的序列化,但是可以使用"key.serializer.class"指定key的序列化 prop.put("serializer.class", MsgEntity.class.getName()); // broker的地址 prop.put("metadata.broker.list", "node1.sunny.cn:9092,node2.sunny.cn:9092,node3.sunny.cn:9092"); // 这个参数用于通知broker接收到message后是否向producer发送确认信号 // 0 - 表示producer不用等待任何确认信号,会一直发送消息,否则producer进入等待状态 // -1 - 表示leader状态的replica需要等待所有in-sync状态的replica都接收到消息后才会向producer发送确认信号,再次之前producer一直处于等待状态 prop.put("request.required.acks", "1"); prop.put("producer.type", "async"); prop.put("batch.num.messages", "5"); return new Producer(new ProducerConfig(prop)); } public static void main(String[] args) { new ProducerDemo("demo").start(); } }
Consumer
package com.kafka; import java.io.ByteArrayInputStream; import java.io.ObjectInputStream; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import kafka.consumer.Consumer; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; public class ConsumerDemoOffsetA extends Thread { // 指定具体的topic private String topic; // 构造函数 public ConsumerDemoOffsetA(String topic) { this.topic = topic; } @SuppressWarnings("deprecation") public void run(){ try{ //构建consumer对象 ConsumerConnector consumer =createConsumer(); //构建一个map对象,代表topic-------String:topic名称,Integer:从这个topic中获取多少条记录 Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); //每次获取1条记录 topicCountMap.put(this.topic, 1); //构造一个messageStreams:输入流 --String:topic名称,List获取的数据 Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer.createMessageStreams(topicCountMap); //获取每次接收到的具体数据 KafkaStream<byte[], byte[]> stream = messageStreams.get(this.topic).get(0); ConsumerIterator<byte[], byte[]> iterator = stream.iterator(); while(iterator.hasNext()){ byte[] data = iterator.next().message(); ByteArrayInputStream ba = new ByteArrayInputStream(data); ObjectInputStream os = new ObjectInputStream(ba); MsgEntity entity = (MsgEntity) BeanUtil.BytesToObject(iterator.next().message()); consumer.commitOffsets(); System.out.println("ConsumerA接收到的数据:" + entity.getUrl()); } }catch(Exception e){ System.out.println(e.getMessage()); } } //创建具体的Consumer @SuppressWarnings("deprecation") private ConsumerConnector createConsumer(){ Properties prop = new Properties(); //声明zk prop.put("zookeeper.connect", "node1.sunny.cn:2181,node2.sunny.cn:2181,node3.sunny.cn:2181"); //指定这个consumer的消费组,每个组只能获取一次消息 prop.put("group.id", "group1"); //smallest和largest(默认) //此配置参数表示当此groupId下的消费者,在ZK中没有offset值时(比如新的groupId,或者是zk数据被清空), //consumer应该从哪个offset开始消费. //largest表示接受接收最大的offset(即最新消息), //smallest表示最小offset,即从topic的开始位置消费所有消息. prop.put("auto.offset.reset", "smallest"); return Consumer.createJavaConsumerConnector(new ConsumerConfig(prop)); } public static void main(String[] args) { new ConsumerDemoOffsetA("demo").start(); } }