kafka入门程序

原创 2017年05月26日 14:14:00

一. 依赖jar包

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>com.zrj.points</groupId>
  <artifactId>kafka-consumer</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <packaging>war</packaging>
  <name>points-consumer</name>
  <properties>
		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
		<spring.version>4.3.3.RELEASE</spring.version>
  </properties>
  <dependencies>
		<dependency>
		  <groupId>org.slf4j</groupId>
		  <artifactId>slf4j-api</artifactId>
		  <version>1.7.5</version>
		</dependency>
		<dependency>
		  <groupId>org.slf4j</groupId>
		  <artifactId>slf4j-log4j12</artifactId>
		  <version>1.7.5</version>
		</dependency>
		<dependency>
		  <groupId>org.scala-lang</groupId>
		  <artifactId>scala-library</artifactId>
		  <version>2.9.2</version>
		</dependency>
		<dependency>
		  <groupId>com.101tec</groupId>
		  <artifactId>zkclient</artifactId>
		  <version>0.3</version>
		</dependency>
		<dependency>
		  <groupId>org.apache.kafka</groupId>
		  <artifactId>kafka_2.9.2</artifactId>
		  <version>0.8.1.1</version>
		</dependency>
		<dependency>
		  <groupId>org.apache.zookeeper</groupId>
		  <artifactId>zookeeper</artifactId>
		  <version>3.4.5</version>
		</dependency>
		<dependency>
		  <groupId>com.yammer.metrics</groupId>
		  <artifactId>metrics-core</artifactId>
		  <version>2.2.0</version>
		</dependency>
		<dependency>
		  <groupId>commons-logging</groupId>
		  <artifactId>commons-logging</artifactId>
		  <version>1.2</version>
		</dependency>
		<dependency>
		  <groupId>log4j</groupId>
		  <artifactId>log4j</artifactId>
		  <version>1.2.17</version>
		</dependency>
		<dependency>
		  <groupId>commons-lang</groupId>
		  <artifactId>commons-lang</artifactId>
		  <version>2.6</version>
		</dependency>
  </dependencies>
  <build>
		<finalName>kafka-consmer</finalName>
		<plugins>
			<plugin>
				<artifactId>maven-compiler-plugin</artifactId>
				<configuration>
					<source>1.7</source>
					<target>1.7</target>
					<encoding>UTF-8</encoding>
				</configuration>
			</plugin>
			
			<plugin>
				<groupId>org.eclipse.jetty</groupId>
				<artifactId>jetty-maven-plugin</artifactId>
				<version>9.2.11.v20150529</version>
				<configuration>
					<jvmArgs>-Xms456m -Xmx456m -XX:MaxNewSize=456m
						-XX:MaxPermSize=1024m</jvmArgs>
					<scanIntervalSeconds>10</scanIntervalSeconds>
					<webApp>
						<contextPath>/</contextPath>
					</webApp>
					<httpConnector>
						<port>9090</port>
					</httpConnector>
				</configuration>
			</plugin>
		</plugins>
	</build>
</project>



二: 生产者

package com.qianbao.kafka;

import java.util.Date;
import java.util.Properties;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class KafkaProducer {

	private final Producer<String, String> producer;
	
	private KafkaProducer() {
		Properties props = new Properties();
		
		props.put("metadata.broker.list", Utils.KAFKA_BROKER_LIST);
		
//		props.put("zk.connect", "");
//		props.put("zookeeper.connect", "");

		// 配置value的序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");
		// 配置key的序列化类
		props.put("key.serializer.class", "kafka.serializer.StringEncoder");
		props.put("request.required.acks", "-1");
		//指定partition
		props.put("partitioner.class", "com.qianbao.kafka.RoutePartition");

		producer = new Producer<String, String>(new ProducerConfig(props));
	}

	void produce(int messageNo, int count) {
		while (messageNo < count) {
			String key = String.valueOf(messageNo);
			String data = "hello kafka message " + key;
			producer.send(new KeyedMessage<String, String>(Utils.KAFKA_TOPIC_NAME,key, data));
			messageNo++;
		}
	}

	public static void main(String[] args) {
		int messageNo = 1000;
		int count = 1010;

		long startTime = new Date().getTime();
		KafkaProducer kafkaProducer = new KafkaProducer();
		kafkaProducer.produce(messageNo, count);
		long endTime = new Date().getTime();
		System.out.println("spend time:" + (endTime - startTime));
	}
}

三.消费者

package com.qianbao.kafka;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;

public class KafkaConsumer {
	private final ConsumerConnector consumer;
	private ExecutorService threadPool;

	private KafkaConsumer() {
		Properties props = new Properties();
		// zookeeper 配置
		props.put("zookeeper.connect", Utils.KAFKA_ZOOKEEPER);

		// group 代表一个消费组
		props.put("group.id", Utils.KAFKA_CONSUMER_GROUP_ID);
		// zk连接超时
		props.put("zookeeper.session.timeout.ms", "40000");
		props.put("zookeeper.sync.time.ms", "200");
		props.put("auto.commit.interval.ms", "1000");
		props.put("auto.offset.reset", "smallest");
		// 序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");

		ConsumerConfig config = new ConsumerConfig(props);

		consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
	}

	void consume() {
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		topicCountMap.put(Utils.KAFKA_TOPIC_NAME, Utils.KAFKA_TOPIC_PARTITION_NUM);
		// topicCountMap.put(KafkaProducer2.TOPIC, new Integer(1));

		StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
		StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

		Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,
				keyDecoder, valueDecoder);
		List<KafkaStream<String, String>> partitions = consumerMap.get(Utils.KAFKA_TOPIC_NAME);
		threadPool = Executors.newFixedThreadPool(Utils.KAFKA_TOPIC_PARTITION_NUM);
		int partitionNum = 0;
		for (KafkaStream<String, String> partition : partitions) {
			threadPool.execute(new MessageReader(partition, partitionNum));
			partitionNum++;
		}
	}

	class MessageReader implements Runnable {
		private KafkaStream<String, String> partition;
		private int partitionNum;

		MessageReader(KafkaStream<String, String> partition, int partitionNum) {
			this.partition = partition;
			this.partitionNum = partitionNum;
		}

		public void run() {
			ConsumerIterator<String, String> it = partition.iterator();
			while (true) {
				if (it.hasNext()) {
					String oneLineLog = it.next().message();
					System.out.println(partitionNum + " partition: "+ oneLineLog);
				} else {
					try {
						Thread.sleep(3000);
					} catch (InterruptedException e) {
						e.printStackTrace();
					}
				}
			}
		}

	}

	public static void main(String[] args) {
		new KafkaConsumer().consume();
	}
}

四:自定义分区

package com.qianbao.kafka;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

public class RoutePartition implements Partitioner {
	public RoutePartition(VerifiableProperties props) {
	}
	
	public int partition(Object arg0, int arg1) {
		// TODO Auto-generated method stub
		return 1;
	}

}

五:工具类(静态常量类)

package com.qianbao.kafka;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

public class RoutePartition implements Partitioner {
	public RoutePartition(VerifiableProperties props) {
	}
	
	public int partition(Object arg0, int arg1) {
		// TODO Auto-generated method stub
		return 1;
	}

}


相关文章推荐

Kafka简单入门程序

基本都是从官网那学来的....抄? 官网 : http://kafka.apache.org/documentation.html#quickstart 一.kafka安装测试 首先下载...

KAFKA从入门到放弃

  • 2017年11月15日 16:40
  • 488KB
  • 下载

kafka入门实战

  • 2017年11月10日 21:55
  • 114KB
  • 下载

Kafka.net使用编程入门(一)

最近研究分布式消息队列,分享下! 首先zookeeper  和 kafka 压缩包 解压 并配置好! 我本机zookeeper环境配置如下: D:\Worksoftware\ApacheZook...
  • WuLex
  • WuLex
  • 2016年06月29日 22:17
  • 3323

kafka入门资料

  • 2017年02月07日 15:25
  • 139KB
  • 下载

log4j写入kafka测试程序

  • 2016年11月11日 17:12
  • 48.43MB
  • 下载

kafka入门综合概要介绍

~ kafka 设计初衷为统一信息收集平台 分布式收集,统一处理 可以实时处理反馈信息支持大数据高容错 分布式、分区的、多副本的、多订阅者的日志系统(分布式MQ系统)消息的发布(p...

kafka-0.10.1.0 全集(API,程序win+linux)

  • 2016年10月26日 10:26
  • 3.76MB
  • 下载

:Hadoop、NoSQL、分布式、lucene、solr、nutch kafka入门:简介、使用场景、设计原理、主要配置及集群搭

问题导读: 1.zookeeper在kafka的作用是什么? 2.kafka中几乎不允许对消息进行“随机读写”的原因是什么? 3.kafka集群consumer和producer状态信息是如何保...

ZooKeeper kafka入门:简介、使用场景、设计原理、主要配置

问题导读: 1.zookeeper在kafka的作用是什么? 2.kafka中几乎不允许对消息进行“随机读写”的原因是什么? 3.kafka集群consumer和producer状态...
内容举报
返回顶部
收藏助手
不良信息举报
您举报文章:kafka入门程序
举报原因:
原因补充:

(最多只允许输入30个字)