kafka及jafka命令及生产者代码实现

kafka:

进入kafka目录,进入bin

列出所有topic

 ./kafka-topics.sh --list --zookeeper x1:2181,x2:2181,x3:2181


 
 创建topic
 ./kafka-topics.sh --zookeeper  x1:2181,x2:2181,x3:2181 --create --topic test --partitions 20 --replication-factor 1


发送消息
 ./kafka-console-producer.sh --broker-list  x1:9092,x2:9092,x3:9092--topic test

消费消息
 ./kafka-console-consumer.sh --zookeeper  x1:2181,x2:2181,x3:2181 --topic test--from-beginning

 
 删除消息(必须在server.properties里配置delete.topic.enable=true)
 ./kafka-topics.sh --zookeeper x1:2181,x2:2181,x3:2181 --delete --topic test

生产者代码如下:

pom配置文件

<dependency>
	  <groupId>org.apache.kafka</groupId>
	  <artifactId>kafka_2.9.1</artifactId>
	  <version>0.8.1.1</version>
	  <scope>compile</scope>
	  <exclusions>
	    <exclusion>
	      <artifactId>jmxri</artifactId>
	      <groupId>com.sun.jmx</groupId>
	    </exclusion>
	    <exclusion>
	      <artifactId>jms</artifactId>
	      <groupId>javax.jms</groupId>
	    </exclusion>
	    <exclusion>
	      <artifactId>jmxtools</artifactId>
	      <groupId>com.sun.jdmk</groupId>
	    </exclusion>
	  </exclusions>
	</dependency>
代码:

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

/**
 */
public class DaceKafkaProducer implements DaceProducer
{
//    private final Producer<String, String> producer;

    private Producer<String, String> producer;
    public DaceKafkaProducer(){
        Properties props = new Properties();
        //此处配置的是kafka的端口
//        props.put("metadata.broker.list", "x1:9092,x2:9092,x3:9092");
//        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "n1:9092,n2:9092,n3:9092");
          props.put("zk_connect", "n1:2181,n2:2181,n3:2181");
        props.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, true);
//        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 10);
//        //配置value的序列化类
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//        //配置key的序列化类
//        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
//        props.put("request.required.acks","1");
        props.put("log.retention.minutes", 1);
// 
//        producer = new KafkaProducer<String, String>(props);
        
        props.put("metadata.broker.list", "x1:9092,x2:9092,x3:9092");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        props.put("log.retention.minutes", 1);
        props.put("request.required.acks", "1");
        ProducerConfig config = new ProducerConfig(props);
        
        producer = new Producer<String, String>(config);
    }
 
    void produce(String topic, String tableName, List<String> datas) {
    	List<KeyedMessage<String, String>> messages = new ArrayList<KeyedMessage<String,String>>();
    	for (String data:datas) {
    		messages.add(new KeyedMessage<String, String>(topic, tableName, data));
    	}
    	producer.send(messages);
    }
    
    public static void main(String[] args) {
//		DaceKafkaProducer producer = new DaceKafkaProducer();
//		for (int i = 0; i < 15; i++) {
//			producer.produce("replay", "test", "testlength" + i);
//		}
	}


jafka:

生产消息

 ./producer-console.sh --broker-list 0:x1:9092,1:x2:9092,2:x3:9092 --topic demo


 ./producer-console.sh --zookeeper x2:2181/jafka --topic demo


./consumer-console.sh --zookeeper x2:2181/jafka --topic demo --from-beginning


pom配置:

		<dependency>
			<groupId>com.sohu.jafka</groupId>
			<artifactId>jafka</artifactId>
			<version>1.2.2</version>
		</dependency>

代码:

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;





import com.sohu.jafka.producer.Producer;
import com.sohu.jafka.producer.ProducerConfig;
import com.sohu.jafka.producer.ProducerData;
import com.sohu.jafka.producer.serializer.StringEncoder;

public class DaceJfkaProducer implements DaceProducer{

	private final Producer<String, String> producer;
	
	public DaceJfkaProducer() {
		Properties props = new Properties();
//		 props.put("broker.list", "0:hadoop001.local:9092,1:hadoop002.local:9092,2:hadoop003.local:9092");
		props.put("zk.connect", "n2:2181/jafka");
		props.put("log.retention.minutes", 1);
		props.put("serializer.class", StringEncoder.class.getName());
		producer = new Producer<String, String>(new ProducerConfig(props));
	}
	
	public void produce(String topic, List<String> data) {
		producer.send(new ProducerData<String, String>(topic, data));
	}
	
	public static void main(String[] args) {
		DaceJfkaProducer producer = new DaceJfkaProducer();
		List<String> data = new ArrayList<String>();
		for (int i = 0; i < 10; i++) {
			data.add("test1" + i);
		}
		producer.produce("demo", data);
	}
}

kafka流程如下:



  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值