kafka入门程序

原创 2017年05月26日 14:14:00

一. 依赖jar包

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>com.zrj.points</groupId>
  <artifactId>kafka-consumer</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <packaging>war</packaging>
  <name>points-consumer</name>
  <properties>
		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
		<spring.version>4.3.3.RELEASE</spring.version>
  </properties>
  <dependencies>
		<dependency>
		  <groupId>org.slf4j</groupId>
		  <artifactId>slf4j-api</artifactId>
		  <version>1.7.5</version>
		</dependency>
		<dependency>
		  <groupId>org.slf4j</groupId>
		  <artifactId>slf4j-log4j12</artifactId>
		  <version>1.7.5</version>
		</dependency>
		<dependency>
		  <groupId>org.scala-lang</groupId>
		  <artifactId>scala-library</artifactId>
		  <version>2.9.2</version>
		</dependency>
		<dependency>
		  <groupId>com.101tec</groupId>
		  <artifactId>zkclient</artifactId>
		  <version>0.3</version>
		</dependency>
		<dependency>
		  <groupId>org.apache.kafka</groupId>
		  <artifactId>kafka_2.9.2</artifactId>
		  <version>0.8.1.1</version>
		</dependency>
		<dependency>
		  <groupId>org.apache.zookeeper</groupId>
		  <artifactId>zookeeper</artifactId>
		  <version>3.4.5</version>
		</dependency>
		<dependency>
		  <groupId>com.yammer.metrics</groupId>
		  <artifactId>metrics-core</artifactId>
		  <version>2.2.0</version>
		</dependency>
		<dependency>
		  <groupId>commons-logging</groupId>
		  <artifactId>commons-logging</artifactId>
		  <version>1.2</version>
		</dependency>
		<dependency>
		  <groupId>log4j</groupId>
		  <artifactId>log4j</artifactId>
		  <version>1.2.17</version>
		</dependency>
		<dependency>
		  <groupId>commons-lang</groupId>
		  <artifactId>commons-lang</artifactId>
		  <version>2.6</version>
		</dependency>
  </dependencies>
  <build>
		<finalName>kafka-consmer</finalName>
		<plugins>
			<plugin>
				<artifactId>maven-compiler-plugin</artifactId>
				<configuration>
					<source>1.7</source>
					<target>1.7</target>
					<encoding>UTF-8</encoding>
				</configuration>
			</plugin>
			
			<plugin>
				<groupId>org.eclipse.jetty</groupId>
				<artifactId>jetty-maven-plugin</artifactId>
				<version>9.2.11.v20150529</version>
				<configuration>
					<jvmArgs>-Xms456m -Xmx456m -XX:MaxNewSize=456m
						-XX:MaxPermSize=1024m</jvmArgs>
					<scanIntervalSeconds>10</scanIntervalSeconds>
					<webApp>
						<contextPath>/</contextPath>
					</webApp>
					<httpConnector>
						<port>9090</port>
					</httpConnector>
				</configuration>
			</plugin>
		</plugins>
	</build>
</project>



二: 生产者

package com.qianbao.kafka;

import java.util.Date;
import java.util.Properties;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class KafkaProducer {

	private final Producer<String, String> producer;
	
	private KafkaProducer() {
		Properties props = new Properties();
		
		props.put("metadata.broker.list", Utils.KAFKA_BROKER_LIST);
		
//		props.put("zk.connect", "");
//		props.put("zookeeper.connect", "");

		// 配置value的序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");
		// 配置key的序列化类
		props.put("key.serializer.class", "kafka.serializer.StringEncoder");
		props.put("request.required.acks", "-1");
		//指定partition
		props.put("partitioner.class", "com.qianbao.kafka.RoutePartition");

		producer = new Producer<String, String>(new ProducerConfig(props));
	}

	void produce(int messageNo, int count) {
		while (messageNo < count) {
			String key = String.valueOf(messageNo);
			String data = "hello kafka message " + key;
			producer.send(new KeyedMessage<String, String>(Utils.KAFKA_TOPIC_NAME,key, data));
			messageNo++;
		}
	}

	public static void main(String[] args) {
		int messageNo = 1000;
		int count = 1010;

		long startTime = new Date().getTime();
		KafkaProducer kafkaProducer = new KafkaProducer();
		kafkaProducer.produce(messageNo, count);
		long endTime = new Date().getTime();
		System.out.println("spend time:" + (endTime - startTime));
	}
}

三.消费者

package com.qianbao.kafka;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;

public class KafkaConsumer {
	private final ConsumerConnector consumer;
	private ExecutorService threadPool;

	private KafkaConsumer() {
		Properties props = new Properties();
		// zookeeper 配置
		props.put("zookeeper.connect", Utils.KAFKA_ZOOKEEPER);

		// group 代表一个消费组
		props.put("group.id", Utils.KAFKA_CONSUMER_GROUP_ID);
		// zk连接超时
		props.put("zookeeper.session.timeout.ms", "40000");
		props.put("zookeeper.sync.time.ms", "200");
		props.put("auto.commit.interval.ms", "1000");
		props.put("auto.offset.reset", "smallest");
		// 序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");

		ConsumerConfig config = new ConsumerConfig(props);

		consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
	}

	void consume() {
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		topicCountMap.put(Utils.KAFKA_TOPIC_NAME, Utils.KAFKA_TOPIC_PARTITION_NUM);
		// topicCountMap.put(KafkaProducer2.TOPIC, new Integer(1));

		StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
		StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

		Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,
				keyDecoder, valueDecoder);
		List<KafkaStream<String, String>> partitions = consumerMap.get(Utils.KAFKA_TOPIC_NAME);
		threadPool = Executors.newFixedThreadPool(Utils.KAFKA_TOPIC_PARTITION_NUM);
		int partitionNum = 0;
		for (KafkaStream<String, String> partition : partitions) {
			threadPool.execute(new MessageReader(partition, partitionNum));
			partitionNum++;
		}
	}

	class MessageReader implements Runnable {
		private KafkaStream<String, String> partition;
		private int partitionNum;

		MessageReader(KafkaStream<String, String> partition, int partitionNum) {
			this.partition = partition;
			this.partitionNum = partitionNum;
		}

		public void run() {
			ConsumerIterator<String, String> it = partition.iterator();
			while (true) {
				if (it.hasNext()) {
					String oneLineLog = it.next().message();
					System.out.println(partitionNum + " partition: "+ oneLineLog);
				} else {
					try {
						Thread.sleep(3000);
					} catch (InterruptedException e) {
						e.printStackTrace();
					}
				}
			}
		}

	}

	public static void main(String[] args) {
		new KafkaConsumer().consume();
	}
}

四:自定义分区

package com.qianbao.kafka;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

public class RoutePartition implements Partitioner {
	public RoutePartition(VerifiableProperties props) {
	}
	
	public int partition(Object arg0, int arg1) {
		// TODO Auto-generated method stub
		return 1;
	}

}

五:工具类(静态常量类)

package com.qianbao.kafka;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;

public class RoutePartition implements Partitioner {
	public RoutePartition(VerifiableProperties props) {
	}
	
	public int partition(Object arg0, int arg1) {
		// TODO Auto-generated method stub
		return 1;
	}

}


Kafka简单入门程序

基本都是从官网那学来的....抄? 官网 : http://kafka.apache.org/documentation.html#quickstart 一.kafka安装测试 首先下载...
  • u011221078
  • u011221078
  • 2016-07-21 14:43:30
  • 1950

kafka学习之路(一)——入门

kafka学习之路(一)——入门Kafka学习之路...一、入门..1、      简介2、      主题(Topics)、日志(Logs)3、      分布式(Distribution)4、  ...
  • tanggao1314
  • tanggao1314
  • 2016-07-16 23:32:26
  • 5125

Kafka入门实例

摘要:本文主要讲了Kafka的一个简单入门实例
  • Evankaka
  • Evankaka
  • 2016-10-12 20:45:50
  • 9003

Kafka从入门到实践

一、基本概念介绍Kafka是一个分布式的、可分区的、可复制的消息系统。它提供了普通消息系统的功能,但具有自己独特的设计。这个独特的设计是什么样的呢?首先让我们看几个基本的消息系统术语: Kafka将消...
  • rxt2012kc
  • rxt2012kc
  • 2017-05-02 15:47:00
  • 612

Kafka入门与实践 PDF

  • 2018年02月05日 08:29
  • 49B
  • 下载

Kafka教程(一)Kafka入门教程

Message Queue 消息传送系统提供传送服务。消息传送依赖于大量支持组件,这些组件负责处理连接服务、消息的路由和传送、持久性、安全性以及日志记录。消息服务器可以使用一个或多个代理实例。 JMS...
  • yuan_xw
  • yuan_xw
  • 2016-04-21 17:29:57
  • 10465

kafka入门与实践 高清、完成、带书签版

  • 2018年02月11日 15:05
  • 55B
  • 下载

Kafka入门经典教程

问题导读 1.Kafka独特设计在什么地方? 2.Kafka如何搭建及创建topic、发送消息、消费消息? 3.如何书写Kafka程序? 4.数据传输的事务定义有哪三种? 5.Kafka...
  • HMSIWTV
  • HMSIWTV
  • 2015-07-19 23:36:21
  • 90588

Kafka入门与实践

  • 2018年02月08日 11:29
  • 172.17MB
  • 下载
收藏助手
不良信息举报
您举报文章:kafka入门程序
举报原因:
原因补充:

(最多只允许输入30个字)