1.kafka的操作

1、kafka脚本操作

1.1、kafka-topics命令

l 查看当前服务器中的所有topic

kafka-topics --zookeeper zk01:2181/kafka --list

l 创建topic(分区、副本)

kafka-topics --zookeeper zk01:2181/kafka --create --topic test --partitions 1 --replication-factor 1

l 查看某个Topic的详情

kafka-topics --zookeeper zk01:2181/kafka --describe --topic test

l 对分区数进行修改

kafka-topics --zookeeper zk01:2181/kafka --alter --topic   utopic --partitions 15

l 删除topic

kafka-topics --zookeeper zk01:2181/kafka --delete --topic test

需要server.properties中设置delete.topic.enable=true否则只是标记删除或者直接重启。

 

1.2、kafka-console-producer命令

l发送消息(手动一行行发送)

kafka-console-producer --broker-list kafka01:9092 --topic test

l发送消息(管道,批量上传一个文件内容)

tail -F /root/log.log | kafka-console-producer --broker-list kafka01:9092 --topic test

 

1.3、kafka-console-consumer命令

l 消费消息

kafka-console-consumer --zookeeper zk01:2181/kafka --topic test --from-beginning

 

1.4、kafka-run-class/kafka-consumer-offset-checker命令(查看消费情况)

l 查看消费位置

kafka-run-class kafka.tools.ConsumerOffsetChecker --zookeeper zk01:2181/kafka --group testgroup

具体到topic

kafka-run-class kafka.tools.ConsumerOffsetChecker --zookeeper zk01:2181/kafka --group testgroup --topic test

或者

kafka-consumer-offset-checker --zookeeper zk01/kafka --group testgroup --topic test

 

2、通过Java的api操作:

maven依赖

<dependency>

<groupId>org.apache.kafka</groupId>

<artifactId>kafka_2.10</artifactId>

<version>0.9.0.0</version>

<exclusions>

<exclusion>

<groupId>junit</groupId>

<artifactId>junit</artifactId>

</exclusion>

</exclusions>

</dependency>

 

 

 

生产者API

import kafka.javaapi.producer.Producer; //注意:一定是Javaapi

import kafka.producer.KeyedMessage;

import kafka.producer.ProducerConfig;

 

import java.util.Properties;

 

/**

* @Company 任子行网络技术股份有限公司

* @CreateTime 2017/8/21 13:59

*/

public class KafkaProducer {

public static void main(String[] args) {

Properties param = new Properties();

param.put("metadata.broker.list", "rzx168:9092,rzx169:9092,rzx177:9092");

param.put("request.required.acks", "-1");

param.put("batch.num.messages", "200");

//默认的分区器,可以自定义,看下面

param.put("partitioner.class", "kafka.producer.DefaultPartitioner");

 

Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(param));

 

for (int i = 0; i < 10000; i++){

producer.send(new KeyedMessage<byte[], byte[]>("testWc1", ("我是第{ " + i + " }条数据").getBytes()));

}

}

}

 

自定义分区器

import kafka.producer.DefaultPartitioner;

import kafka.utils.VerifiableProperties;

 

/**

* @Company 任子行网络技术股份有限公司

* @CreateTime 2017/8/21 14:47

*/

public class KafkaPartitioner extends DefaultPartitioner {

public KafkaPartitioner(VerifiableProperties props) {

super(props);

}

 

public int partition(Object obj, int numPartitions) {

// return Math.abs(obj.hashCode()%numPartitions);

return 2;

}

}

 

消费者API

import kafka.consumer.Consumer;

import kafka.consumer.ConsumerConfig;

import kafka.consumer.ConsumerIterator;

import kafka.consumer.KafkaStream;

import kafka.javaapi.consumer.ConsumerConnector;

import kafka.message.MessageAndMetadata;

 

import java.util.HashMap;

import java.util.List;

import java.util.Map;

import java.util.Properties;

import java.util.concurrent.ExecutorService;

import java.util.concurrent.Executors;

 

/**

* @Company 任子行网络技术股份有限公司

* @CreateTime 2017/8/21 14:17

*/

public class KafkaConsumer {

public static void main(String[] args) {

String topic = "testWc1";

 

Properties param = new Properties();

param.put("group.id", "test_wl_10");

param.put("zookeeper.connect", "rzx168:2181,rzx169:2181,rzx177:2181/kafka");

param.put("auto.offset.reset", "largest");

param.put("auto.commit.interval.ms", "1000");

param.put("partition.assignment.strategy", "roundrobin");

 

ConsumerConfig config = new ConsumerConfig(param);

ConsumerConnector consumerConn = Consumer.createJavaConsumerConnector(config);

 

//创建topicCountMap

Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

topicCountMap.put(topic,2);

 

Map<String, List<KafkaStream<byte[], byte[]>>> topicStreamsMap = consumerConn.createMessageStreams(topicCountMap);

//取出 `kafkaTest` 对应的 所有partition

List<KafkaStream<byte[], byte[]>> everyPartition = topicStreamsMap.get(topic);

//创建一个容量为20的线程池

ExecutorService executor = Executors.newFixedThreadPool(9);

//创建20个consumer threads

for (int i = 0; i < everyPartition.size(); i++){

ConsumerIterator<byte[], byte[]> iterator = everyPartition.get(i).iterator();

executor.execute(new KafkaSimpleConsumer("消费者" + (i + 1), iterator));

}

}

 

static class KafkaSimpleConsumer implements Runnable{

String title;

ConsumerIterator<byte[], byte[]> iterator;

 

KafkaSimpleConsumer(String title, ConsumerIterator<byte[], byte[]> iterator){

this.title = title;

this.iterator = iterator;

}

 

@Override

public void run() {

while (iterator.hasNext()){

MessageAndMetadata<byte[], byte[]> msgMetadata = iterator.next();

String topic = msgMetadata.topic();

int partition = msgMetadata.partition();

byte[] message = msgMetadata.message();

 

System.out.println("topic:" + topic + " | partition:" + partition + " | message:" + new String(message));

}

}

}

}

 

 

注意问题:

如果需要在一个类中用相同的group.id多次连接kafka,需要每次都关闭上一次连接,否则的话,新创建的线程不会partition消费。

connector = Consumer.createJavaConsumerConnector(config);

messageStreams = connector.createMessageStreams(topicAndPartitionMap);

threadPool = Executors.newFixedThreadPool(threadPoolSize);

 

connector.shutdown(); //一定要关闭这三个对象,下次再重新创建

messageStreams.clear();

threadPool.shutdownNow();

 

kafka设置如下配置,那么producer生产数据的时候,会自动生成topic

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值