KafKa java原生应用

不用框架

KafkaProducer,这里有问题:发送是否要接受回应。(三种 :1//只发送消息  2//同步发送消息,同步获取返回信息(阻塞)  3:异步发送,需要异步回调类,实现Callback)

package com.hangzhou.kafka;

import java.util.Properties;
import java.util.concurrent.Future;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

/**
*生产者
/*

public class MyProducer {

	private static KafkaProducer<String, String> producer;

	static {
		Properties properties = new Properties();

		properties.put("bootstrap", "127.0.0.1:9092");
		//key 和 value序列化
		properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		//配置自定义的分配器
		properties.put("partitioner.class", "com.hangzhou.kafka.CustomPartitioner");
		
		producer = new KafkaProducer<>(properties);
	}
	//只发送消息
	private static void sendMessageForgetResult() {
		//topic是字符串,key value可能跟填写的类型或序列化有关
		ProducerRecord<String, String> record = new ProducerRecord<>("imooc-kafka-study", "name", "ForgetResult");
		producer.send(record);//Future<RecordMetadata> send可能有异常
		producer.close();
	}
	
	//同步发送消息,同步获取返回信息
	private static void sendMessageSync()throws Exception {
		
		ProducerRecord<String, String> record = new ProducerRecord<>("imooc-kafka-study", "name", "sync");
		
		RecordMetadata result = producer.send(record).get(); //Future返回的结果
		
		System.out.println(result.topic()); //topic
		
		System.out.println(result.partition()); //0分区开始
		
		System.out.println(result.offset());//偏移 0开始
		
		producer.close();
	}
	
	//1.异步发送
	private static void sendMessageCallback() {
		
		ProducerRecord<String, String> record = new ProducerRecord<>("imooc-kafka-study", "name", "callback");
		
		producer.send(record, new MyProducerCallback());
		
		producer.close();
		
	}
	
	
	//2.异步发送,需要异步回调类,实现Callback
	private static class MyProducerCallback implements Callback {
		
		@Override
		public void onCompletion(RecordMetadata metadata, Exception exception) {
			
			if (exception != null) {
				exception.printStackTrace();
				return;
			}
			System.out.println(metadata.topic()); //topic
			
			System.out.println(metadata.partition()); //0分区开始
			
			System.out.println(metadata.offset());//偏移 0开始
			
			System.out.println("Coming in MyProducerCallback");
			
		}
		
	}
	
	
	public static void main(String[] args) throws Exception {
		
		sendMessageForgetResult();
		sendMessageSync();
		sendMessageCallback();
	}
	

}

还有一个知识点 分区分配器

package com.hangzhou.kafka;

import java.util.List;
import java.util.Map;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.network.InvalidReceiveException;
import org.apache.kafka.common.utils.Utils;


/**
 * 自定义分区分配器
 * @author Administrator
 *
 */

public class CustomPartitioner implements Partitioner {
	
	//keyBytes key的字节数组    Cluster kafka集群
	@Override
	public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
		
		List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
		
		int size = partitionInfos.size();
		//key不能为空,或者不是String类型
		if (null == keyBytes || !(key instanceof String)) {
			throw new InvalidReceiveException("kafka message must have key");
		}
		//如果只有一个分区
		if (size == 1) {
			return 0;
		}
		//key是name ,放最后一个分区
		if (key.equals("name")) {
			return size-1;
		}
		//根据keyBytes计算出hash值,在取余
		return Math.abs(Utils.murmur2(keyBytes)) %(size - 1 );
	}
	
	@Override
	public void configure(Map<String, ?> configs) {
		
	}

	

	@Override
	public void close() {
		
	}
	
	

}

Consumer:也有一个问题,offset位移提交

package com.hangzhou.kafka;
/**
 * 消费者,消费消息 ,不是线程安全的,要一个消费者对应一个线程
 * @author Administrator
 *
 */

import java.util.Collections;
import java.util.Map;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;

public class MyConsumer {

	private static KafkaConsumer<String, String> consumer;

	private static Properties properties;

	static {
		properties = new Properties();

		properties.put("bootstrap", "127.0.0.1:9092");
		properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		// 指定消费者组
		properties.put("group.id", "kafkaStudy");
	}

	// 1.自动提交位移消费方式。 减少消息的重复消费,避免消息的丢失。引入主动的位移
	private static void generalConsumeMessageAutoCommit() {
		// 允许自动提交位移(offset)
		properties.put("enable.auto.commit", true);
		consumer = new KafkaConsumer<String, String>(properties);
		// 指定要消费的Topic
		consumer.subscribe(Collections.singleton("imooc-kafka-study"));

		while (true) {
			boolean flag = true;
			// 拉取kafka中的数据
			ConsumerRecords<String, String> records = consumer.poll(100);// 延迟
			// 打印消息
			for (ConsumerRecord<String, String> record : records) {
				System.out.println(String.format("topic = %s,partition = %s,key = %s ,value = %s", record.topic(),
						record.partition(), record.key(), record.value()));
				// 判断,当最后的消息之后done
				if (record.value().equals("done")) {
					flag = false;
				}
			}
			// 跳出循环
			if (!flag) {
				break;
			}

		}

		consumer.close();
	}

	// 同步主动提交位移
	private static void generalConsumerMessageSyncCommit() {
		// 取消自动提交位移
		properties.put("auto.commit.offset", false);
		consumer = new KafkaConsumer<String, String>(properties);
		// 指定要消费的Topic,可以是参数集合,列表
		consumer.subscribe(Collections.singleton("imooc-kafka-study"));

		while (true) {
			boolean flag = true;

			ConsumerRecords<String, String> records = consumer.poll(100);

			for (ConsumerRecord<String, String> record : records) {
				System.out.println(String.format("topic = %s,partition = %s,key = %s ,value = %s", record.topic(),
						record.partition(), record.key(), record.value()));
				// 判断,当最后的消息之后done
				if (record.value().equals("done")) {
					flag = false;
				}
			}
			// 同步提交,会阻塞
			consumer.commitSync();

			if (!flag) {
				break;
			}

		}
	}

	// 异步主动提交
	private static void generalConsumeMessageAsyncCommit() {
		// 取消自动提交位移
		properties.put("auto.commit.offset", false);
		consumer = new KafkaConsumer<String, String>(properties);
		// 指定要消费的Topic,可以是参数集合,列表
		consumer.subscribe(Collections.singleton("imooc-kafka-study"));

		while (true) {
			boolean flag = true;

			ConsumerRecords<String, String> records = consumer.poll(100);

			for (ConsumerRecord<String, String> record : records) {
				System.out.println(String.format("topic = %s,partition = %s,key = %s ,value = %s", record.topic(),
						record.partition(), record.key(), record.value()));
				// 判断,当最后的消息之后done
				if (record.value().equals("done")) {
					flag = false;
				}
			}
			// commit A,offset 2000
			// commit B,offset 3000
			// 异步提交,不会阻塞。缺点:服务器返回提交失败,提交不会重试。
			consumer.commitAsync();

			if (!flag) {
				break;
			}

		}

	}

	// 异步主动提交,加提交回调
	private static void generalConsumeMessageAsyncCommitWitchCallback() {

		// 取消自动提交位移
		properties.put("auto.commit.offset", false);
		consumer = new KafkaConsumer<String, String>(properties);
		// 指定要消费的Topic,可以是参数集合,列表
		consumer.subscribe(Collections.singleton("imooc-kafka-study"));

		while (true) {
			boolean flag = true;

			ConsumerRecords<String, String> records = consumer.poll(100);

			for (ConsumerRecord<String, String> record : records) {
				System.out.println(String.format("topic = %s,partition = %s,key = %s ,value = %s", record.topic(),
						record.partition(), record.key(), record.value()));
				// 判断,当最后的消息之后done
				if (record.value().equals("done")) {
					flag = false;
				}
			}
			// 位移提交的回调
			consumer.commitAsync(new OffsetCommitCallback() {

				// 这是内部类
				@Override
				public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
					// 打印异常
					if (exception != null) {
						System.out.println("commit failed for offsets:" + exception.getMessage());
					}
					
					//要在提交可以用序号一致比较
				}
			});

			if (!flag) {
				break;
			}

		}
	}

	// 混合异步同步提交,当应用退出的时候,我们希望最后的提交也能成功。
	private static void mixSyncAndAsyncCommit() {

		// 取消自动提交位移
		properties.put("auto.commit.offset", false);
		consumer = new KafkaConsumer<String, String>(properties);
		// 指定要消费的Topic,可以是参数集合,列表
		consumer.subscribe(Collections.singleton("imooc-kafka-study"));

		try {
			while (true) {
				
				boolean flag = true;
				
				ConsumerRecords<String, String> records = consumer.poll(100);
				//消费消息
				for (ConsumerRecord<String, String> record : records) {
					System.out.println(String.format("topic = %s,partition = %s,key = %s ,value = %s", record.topic(),
							record.partition(), record.key(), record.value()));
					// 判断,当最后的消息之后done
					if (record.value().equals("done")) {
						flag = false;
					}
				}
				
			}
		} catch (Exception e) {
			System.out.println("commit async error:"+e.getMessage());
		}finally {
			try {
				consumer.commitSync();
			} finally {
				consumer.close();
			}
		}

	}

	public static void main(String[] args) {

	}

}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值