生产者
package com.msb.bigdata.spark.streaming
import java.util.Properties
import java.util.concurrent.Future
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata}
import org.apache.kafka.common.serialization.StringSerializer
object KafkaProducer {
def main(args: Array[String]): Unit = {
val pros = new Properties()
pros.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.100.123:9092,192.168.100.123:9093,192.168.100.123:9094")
pros.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
pros.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
val producer = new KafkaProducer[String, String](pros)
while (true) {
for (i <- 1 to 4; j <- 1 to 2) {
val record = new ProducerRecord[String, String]("test", s"item$j", s"action$i")
val records: Future[RecordMetadata] = producer.send(record)
val metadata: RecordMetadata = records.get()
val partition: Int = metadata.partition()
val offset: Long = metadata.offset()
println(s"[key:item$j, value:action$i, partition:$partition, offset:$offset]")
}
Thread.sleep(1000)
}
producer.close()
}
}
消费者
package com.zjw.spark.streaming
import java.util
import java.util.Properties
import java.util.regex.Pattern
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRebalanceListener, ConsumerRecord, ConsumerRecords, KafkaConsumer, OffsetAndMetadata}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
object KafkaConsumer {
def main(args: Array[String]): Unit = {
val pros = new Properties()
pros.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.100.123:9092,192.168.100.123:9093,192.168.100.123:9094")
pros.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
pros.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
pros.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")
pros.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
pros.put(ConsumerConfig.GROUP_ID_CONFIG, "bula5")
val consumer = new KafkaConsumer[String, String](pros)
consumer.subscribe(Pattern.compile("test"), new ConsumerRebalanceListener {
override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {
println(s"onPartitionsRevoked.........")
val iter = partitions.iterator()
while (iter.hasNext) {
println(iter.next())
}
}
override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = {
println(s"onPartitionsAssigned.........")
val iter = partitions.iterator()
while (iter.hasNext) {
println(iter.next())
}
consumer.seek(new TopicPartition("test",1),220)
}
})
val offMap = new util.HashMap[TopicPartition, OffsetAndMetadata]()
var record: ConsumerRecord[String, String] = null;
while (true) {
val records: ConsumerRecords[String, String] = consumer.poll(100)
if (!records.isEmpty) {
println(s"----${records.count()}----")
val iter = records.iterator()
while (iter.hasNext) {
record = iter.next()
val topic: String = record.topic()
val partition: Int = record.partition()
val offset = record.offset()
val key: String = record.key()
val value: String = record.value()
println(s"[key:$key, value:$value, topic:$topic, partition:$partition, offset:$offset]")
}
val partition = new TopicPartition("test", record.partition())
val offset = new OffsetAndMetadata(record.offset())
offMap.put(partition, offset)
consumer.commitSync(offMap)
}
}
}
}