kafka的javaAPl实现Producer和Consumer(生产与消费)

kafka的javaAPl实现Producer和Consumer(生产与消费)

  1. 创建maven项目,并编写pom文件

    <dependencies>
       <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
    		<dependency>
    			<groupId>org.apache.kafka</groupId>
    			<artifactId>kafka_2.12</artifactId>
    			<version>2.6.0</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.apache.kafka</groupId>
    			<artifactId>kafka-clients</artifactId>
    			<version>2.6.0</version>
    		</dependency>
    
    		<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-clients -->
    		<dependency>
    			<groupId>org.apache.flink</groupId>
    			<artifactId>flink-clients_2.12</artifactId>
    			<version>1.11.1</version>
    		</dependency>
    
    		<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-streaming-java -->
    		<dependency>
    			<groupId>org.apache.flink</groupId>
    			<artifactId>flink-streaming-java_2.12</artifactId>
    			<version>1.11.1</version>
    		</dependency>
    
    
    		<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-java -->
    		<dependency>
    			<groupId>org.apache.flink</groupId>
    			<artifactId>flink-java</artifactId>
    			<version>1.11.1</version>
    		</dependency>
    
    		<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-connector-kafka -->
    		<dependency>
    			<groupId>org.apache.flink</groupId>
    			<artifactId>flink-connector-kafka_2.12</artifactId>
    			<version>1.11.1</version>
    		</dependency>
    
    		<dependency>
    			<groupId>commons-logging</groupId>
    			<artifactId>commons-logging</artifactId>
    			<version>1.1.1</version>
    		</dependency>
    
    		<dependency>
    			<groupId>commons-cli</groupId>
    			<artifactId>commons-cli</artifactId>
    			<version>1.4</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.slf4j</groupId>
    			<artifactId>slf4j-api</artifactId>
    			<version>1.8.0-beta0</version>
    		</dependency>
    		<dependency>
    			<groupId>org.slf4j</groupId>
    			<artifactId>slf4j-log4j12</artifactId>
    			<version>1.8.0-beta0</version>
    		</dependency>
    
    		<dependency>
    			<groupId>log4j</groupId>
    			<artifactId>log4j</artifactId>
    			<version>1.2.17</version>
    		</dependency>
      </dependencies>
    
  2. 编写kafkaProducer生产者

    package com.huyue.kafka;
    
    import java.util.Properties;
    
    import org.apache.commons.logging.Log;
    import org.apache.commons.logging.LogFactory;
    import org.apache.kafka.clients.producer.Callback;
    import org.apache.kafka.clients.producer.KafkaProducer;
    import org.apache.kafka.clients.producer.ProducerRecord;
    import org.apache.kafka.clients.producer.RecordMetadata;
    import org.apache.log4j.Logger;
    
    /** 
     * @author Hu.Yue
     *
     */
    public class kafkaProducer {
    
    	private static Logger logger = Logger.getLogger(kafkaProducer.class);
    	private KafkaProducer<String, String> producer;
    	private Properties properties;
    	
    	
    	
    	public kafkaProducer() {
    		properties = new Properties();
    		
    		//集群地址
    		properties.put("bootstrap.servers", "localhost");
    		//安全机制
    		properties.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username='xxx' password='xxx';");
    		properties.put("security.protocol", "SASL_PLAINTEXT");
    		properties.put("sasl.mechanism", "PLAIN");
    		//序列化
    		properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    		properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    		//参数配置
    		properties.put("acks", "all");
    		properties.put("retries", 3);
    		properties.put("batch.size", 65536);
    		properties.put("linger.ms", 1);
    		properties.put("buffer.memory", 33554432);
    		properties.put("max.request.size", 10485760);
    
    		producer = new KafkaProducer<String, String>(properties);
    	}
    	
    	
    	
    	/**  
    	* @Author: Hu.Yue
    	* @Title: sendRecorder  
    	* @Description: 默认分区发送数据 
    	* @param @param key
    	* @param @param value 
    	* @return void 
    	* @throws  
    	*/ 
    	public void sendRecorder(String key, String value) {
    		ProducerRecord<String, String> record = new ProducerRecord<String, String>("ict-topic",key, value);
    		producer.send(record);
    	}
    	
    	
    	/**  
    	* @Author: Hu.Yue
    	* @Title: sendRecordWithCallBack  
    	* @Description: TODO 
    	* @param @param key
    	* @param: @param value 
    	* @return: void 
    	* @throws  
    	*/ 
    	public void sendRecordWithCallBack(String key, String value) {
    		final Log logger = LogFactory.getLog(kafkaProducer.class);
    		
    		ProducerRecord<String, String> record = new ProducerRecord<String, String>("ict-topic",key, value);
    		
    		producer.send(record, new Callback() {
    			
    			public void onCompletion(RecordMetadata metadata, Exception exception) {
    				if(exception == null) {
    					logger.info("存储分区: partition: " + metadata.partition() + ", offset:" + metadata.offset() + ", ts:" + metadata.timestamp());
    				}else {
    					exception.printStackTrace();
    				}
    				
    			}
    		});
    	}
    	
    	
    	
    	/**  
    	* @Author: Hu.Yue
    	* @Title: close  
    	* @Description: 关闭生产链接 
    	* @param  
    	* @return void 
    	* @throws  
    	*/ 
    	public void close() {
    		producer.flush();
    		producer.close();
    	}
    	
    	public static void main(String[] args) {
    		kafkaProducer client = new kafkaProducer();
    		
    		String value = "hello world123";
    		client.sendRecorder("huyue", value);
    		
    		client.close();
    	}
    }
    
  3. 编写kafkaConsumer消费者

    /**
     * 
     */
    package com.huyue.kafka;
    
    import java.util.ArrayList;
    import java.util.List;
    import java.util.Properties;
    
    import org.apache.commons.logging.Log;
    import org.apache.commons.logging.LogFactory;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    
    /**
     * @author Hu.Yue
     *
     */
    public class kafkaConsumer {
    
    	private Properties properties = new Properties();
    	private KafkaConsumer<String, String> consumer;
    	Log logger = LogFactory.getLog(kafkaConsumer.class);
    	
    	public kafkaConsumer() {
    		
    		properties.setProperty("bootstrap.servers", "localhost");
    		properties.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username='xxx' password='xxx';");
    		properties.put("security.protocol", "SASL_PLAINTEXT");
    		properties.put("sasl.mechanism", "PLAIN");
    		properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    		properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    		properties.setProperty("group.id", "java_group1");
    		properties.setProperty("auto.offset.reset", "earliest");	
    		properties.setProperty("kafka.consumer.auto.offset.reset", "enableAutoCommit");
    		consumer = new KafkaConsumer<String, String>(properties);
    	}
    	
    	
    	
    	/**  
    	* @Author: Hu.Yue
    	* @Title: subscribeTopic  
    	* @Description: 订阅Topic 
    	* @param  
    	* @return void 
    	* @throws  
    	*/ 
    	public void subscribeTopic() {
    		kafkaProducer client = new kafkaProducer();
    		
    		List<String> topList = new ArrayList<String>();
    		
    		topList.add("ict-topic");
    		consumer.subscribe(topList);
    		
    		while(true) {
    			ConsumerRecords<String, String> records = consumer.poll(100);
    			for(ConsumerRecord<String, String> record : records) {
    				System.out.println(record.key());
    				System.out.println(record.value());
    			}
    		}
    	}
    	
    	public static void main(String[] args) {
    		kafkaConsumer kafkaConsumer = new kafkaConsumer();
    		kafkaConsumer.subscribeTopic();
    	}
    	
    }
    
    
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值