KafKa安装与使用

KAFKA Download URL​​​​​​

vim config/server.properties
broker.id=0
advertised.listeners=PLAINTEXT://localhost:9092
log.dirs=/home/kafka/logs
num.partitions=2
zookeeper.connect=localhost:2181

zkServer.sh start
ll /home/kafka/logs/
./bin/kafka-server-start.sh ./config/server.properties &
zkCli.sh
ls /
[cluster, controller, brokers, zookeeper, admin, isr_change_notification, p, controller_epoch, consumers, config]
kafka-server-stop.sh
zkServer.sh stop

kafka管理器kafka-manager部署安装

kafka web console安装

Apache Kafka 入门 - Kafka-manager的基本配置和运行

unzip kafka-manager-1.3.0.4.zip
vim application.conf
kafka-manager.zkhosts="localhost:2181"
nohup bin/kafka-manager -Dconfig.file=conf/application.conf &
http://localhost:9000/
kafka-topics.sh --zookeeper localhost:2181 --list
kafka-console-producer.sh --broker-list 59.110.138.145 --topic test
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>cn.itcast.kafka</groupId>
  <artifactId>kafka</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <packaging>war</packaging>
  
  <properties>
		<junit.version>4.12</junit.version>
  </properties>
  
  <dependencies>
	<dependency>
		<groupId>junit</groupId>
		<artifactId>junit</artifactId>
		<version>4.9</version>
	</dependency>
	<dependency>
	  <groupId>org.apache.kafka</groupId>
	  <artifactId>kafka_2.10</artifactId>
	  <version>0.8.2.1</version>
	</dependency>
  </dependencies>	
  
</project>
import java.util.Properties;
import java.util.concurrent.TimeUnit;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import kafka.serializer.StringEncoder;

public class KafkaProducer {
	
	public static final String topic = "test";

	public static void main(String[] args) throws InterruptedException {
		Properties props = new Properties();
		props.put("zookeeper.connect", "localhost:2181");
		props.put("serializer.class", StringEncoder.class.getName());
		props.put("metadata.broker.list", "localhost:9092");
		props.put("request.required.acks", "1");
		Producer<Integer, String> producer = new Producer<Integer, String>(new ProducerConfig(props));
		for(int i=0;i<10;i++) {
			producer.send(new KeyedMessage<Integer,String>(topic,"hello kafka"+i));
			System.out.println("send message: " + "hello kafka" + i);
			TimeUnit.SECONDS.sleep(1);
		}
		producer.close();
	}
	
}
kafka-console-consumer.sh --zookeeper localhost --topic test --from-beginning
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;

public class kafkaConsumer {

    public static final String topic = "test";  
      
    public static void main(String[] args) {		
    	Properties props = new Properties();  
    	// 声明zk
    	props.put("zookeeper.connect", "localhost:2181");  
    	// 必须要使用别的组名称, 如果生产者和消费者都在同一组,则不能访问同一组内的topic数据
    	props.put("group.id", "group1");  
    	// zk连接超时
    	props.put("zookeeper.session.timeout.ms", "4000");
    	props.put("zookeeper.sync.time.ms", "200");
    	props.put("auto.commit.interval.ms", "1000");
    	props.put("auto.offset.reset", "smallest");
    	// 序列化类
    	props.put("serializer.class", "kafka.serializer.StringEncoder");
    	
        ConsumerConfig conf = new ConsumerConfig(props);
        ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(conf);
        
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        
        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
        
        Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
        
        KafkaStream<String, String> stream = consumerMap.get(topic).get(0); 
        ConsumerIterator<String, String> it = stream.iterator(); 
        
        while (it.hasNext()) {
            System.out.println( it.next().message()); 
    	} 
    
	}
  
      
}

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值