kaka 是一个分布式消息队列,是一个消息中间件,是一个发布订阅消息系统
安装好kafka后,配置 server.properties文件,内容如下:
vi ./server.properties
broker.id=0
advertised.listeners=PLAINTEXT://192.168.153.141:9092
log.dirs=/opt/bigdata/kafka211/kafka-logs
zookeeper.connect=192.168.153.141:2181
delete.topic.enable=true
包括:broker、producer 、consumer 、topic
前台启动:bin/kafka-server-start.sh config/server.properties (如果配置的kafka的环境变量,可以直接使用:kafka-server-start.sh config/server.properties)
后台启动:kafka-server-start.sh -daemon ./config/server.properties
// 创建topic
[root@lijia1 kafka211]# kafka-topics.sh --create --zookeeper 192.168.153.141:2181 --topic kb07demo --partitions 3 --replication-factor 1
// 查看当前kafka中的topic
[root@lijia1 kafka211]# kafka-topics.sh --zookeeper 192.168.153.141:2181 --list
// 查看topic详情
[root@lijia1 kafka211]# kafka-topics.sh --zookeeper 192.168.153.141:2181 --describe --topic kb07demo
// 删除topic
[root@lijia1 kafka211]# kafka-topics.sh --zookeeper 127.0.0.1:2181 --delete --topic kb05
// 创建生产者,产生数据
[root@lijia1 kafka211]# kafka-console-producer.sh --topic kb07demo --broker-list 192.168.153.141:9092
// 创建消费者,重头开始取数据
[root@lijia1 ~]# kafka-console-consumer.sh --bootstrap-server 192.168.153.141:9092 --topic kb07demo --from-beginning
// 查看topic消息队列数量
[root@lijia1 config]# kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list hadoop31:9092 --topic kb07demo -time -1 --offsets 1
kb07demo:0:1
kb07demo:1:2
kb07demo:2:1
kafkaAPI(java)
Producer API:
Properties prop=new Properties();
//指定一个broker
prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop131:9092");
//序列化key value
prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
prop.put(ProducerConfig.ACKS_CONFIG,"1");
//创建一个kafkaproducer 对象
KafkaProducer<String,String> producer = new KafkaProducer<>(prop);
//生产数据
for (int i=1;i<10;i++){
//创建producerRecord 用来装数据 参数是(topic,“数据”)
ProducerRecord<String,String> producerRecord = new ProducerRecord<>("test2020","hello scala"+i);
//调用send函数 ,把创建的producerRecord对象 通过send函数进行发送
producer.send(producerRecord);
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
//关闭创建的生产者对象
producer.close()
consumer API:
Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop131:9092");
//反序列化 key value
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
//ZooKeeper的session的超时时间
prop.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,"30000");
//自动提交失效,需要自己提交
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
//earliest 每次重头开始读取
//latest 每次从最新的offset开始读
//none 抛异常
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"GROUP_1");
//创建一个kafka的消费对象
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
//消费者通过subscribe 订阅相应的topic
consumer.subscribe(Collections.singletonList("kb07demo"));
while (true) {
//获得topic中的每条记录
ConsumerRecords<String, String> poll = consumer.poll(100);
for (ConsumerRecord<String, String> record : poll) {
System.out.printf("offset:%d,key:%s,value:%s",record.offset(),record.key(),record.value().toString());
System.out.println();
}
}
//三个线程 读取topic中的数据
for (int i = 0; i < 3; i++) {
new Thread(new Runnable() {
@Override
public void run() {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
consumer.subscribe(Collections.singletonList("test2020"));
while (true) {
ConsumerRecords<String, String> poll = consumer.poll(100);
for (ConsumerRecord<String, String> record : poll) {
System.out.printf("offset:%d,key:%s,value:%s",record.offset(),record.key(),record.value().toString());
System.out.println(Thread.currentThread().getName());
}
}
}
}).start();
}