MyProducer
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.Scanner;
public class MyProducer {
public static void main(String[] args) {
// Properties properties = new Properties();
// properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.111.131:9092");
// properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
/**
* 0 producer在发送数据后,不会等待broker任何响应,无法确保数据正确送到broker中
* 1 producer在发送数据后,只需要得到副本(leader,follower)中leader确认就可以
* kafka默认方式,有可能会丢失数据
* -1(all) producer需要等到所有的副本,响应时间最长,数据最安全,不会丢失数据,但数据可能重复(只多不少)
*
*/
// properties.put(ProducerConfig.ACKS_CONFIG,"0");
// KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
// Scanner scanner = new Scanner(System.in);
// String tag="1";
// while (tag.equals("1")){
// System.out.println("请输入要发送到kafka的内容:");
// String content=scanner.nextLine();
// ProducerRecord<String, String> record = new ProducerRecord<>("mydemo2", content);
// producer.send(record);
// System.out.println("是否退出? 0:退出 1:继续发送 请输入");
// tag=scanner.nextLine();
// }
//多线程
long start=System.currentTimeMillis();
for (int i=0;i<1000;i++){
new Thread(
new Runnable() {
@Override
public void run() {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.111.131:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
properties.put(ProducerConfig.ACKS_CONFIG,"-1");
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
for (int j=0;j<100000;j++){
String name = Thread.currentThread().getName();
String sendMsg="currentThread name is" + name +"send msg count" + j;
ProducerRecord<String, String> record = new ProducerRecord<>("mydemo2",sendMsg);
producer.send(record);
System.out.println(sendMsg);
if(j%1000==0){
try {
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
}
).start();
}
System.out.println("time: "+(System.currentTimeMillis()-start));
System.out.println("geme over");
}
}
MyConsumer
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Collections;
import java.util.Properties;
public class MyConsumer {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.111.131:9092");
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"10000");
//earliest 当各分区下有已经提交的offset,从上次提交的offset后开始消费
// 如果没有查到已经提交的offset,从分区内的第一条消息开始消费
//latest 当各分区有已经提交的offset,从上次提交的offset后开始消费
// 如果没有查到已经提交的offset,消费新产生的分区下的消息
//none 当各分区有已经提交的offset,从上次提交的offset后开始消费
// 如果没有查到已经提交的offset,抛出异常
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
//每一个消费,都要定义不同的Group_ID
prop.put(ConsumerConfig.GROUP_ID_CONFIG,"group_1");
/**
* 一个消费者组里面,只有一个消费者
*/
// KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(prop);
// kafkaConsumer.subscribe(Collections.singleton("mydemo"));
//
// while(true){
// ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
// for(ConsumerRecord<String,String>record:records){
// System.out.println(record.offset()+" "+record.key()+" "+record.value());
// System.out.println();
//
// }
System.out.println("-------------------------------------");
// //如果自动提交关闭 ENABLE_AUTO_COMMIT_CONFIG设置为FALSE
// //手动提交方法
kafkaConsumer.commitAsync();
// }
for (int i = 0; i < 12 ; i++) {
new Thread(new Runnable() {
@Override
public void run() {
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(prop);
kafkaConsumer.subscribe(Collections.singleton("mydemo"));
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
String content=Thread.currentThread().getName()+"没有数据";
for (ConsumerRecord<String, String> record : records) {
System.out.println(Thread.currentThread().getName() + " "+record.offset() + " "
+ record.key() + " " + record.value());
System.out.println(content);
}
if (records.isEmpty()){
System.out.println(content);
}
}
}
}).start();
}
}
}