import org.apache.kafka.clients.producer.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Properties;
public classProducer{private static final Logger logger = LoggerFactory.getLogger(Producer.class);private static KafkaProducerkafkaProducer;public static voidmain(String[] args) {
KafkaProducer producer=getKafkaProducer();for (int i = 0; i < 100; i++) {//同步发送
producer.send(new ProducerRecord<>("test", Integer.toString(i)));//异步发送
/*producer.send(new ProducerRecord("test", Integer.toString(i + 100)), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
exception.printStackTrace();
}
}
});*/}
producer.close();
}/**
* 获取kafka客户端
*
* @return*/
public staticKafkaProducer getKafkaProducer() {if (kafkaProducer == null) {
synchronized (Producer.class) {try{if (kafkaProducer == null) {
kafkaProducer=initKafkaProducer();
}
}catch(Exception e) {
logger.error("KafkaClient创建失败...." +kafkaProducer, e);
}
}
}returnkafkaProducer;
}/**
* 初始化kafka客户端
*
* @return*/
private staticKafkaProducer initKafkaProducer() {
Properties props= newProperties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"master:9092");//0异步 1同步(主) -1/all同步(主,副本)
props.put(ProducerConfig.ACKS_CONFIG, "1");//失败允许重试的次数
props.put(ProducerConfig.RETRIES_CONFIG, 0);//每个批次发送多大的数据
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);//定时发送, 达到1ms发送
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);//缓存的大小
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);//自定义分区
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, RoundRobinPartitioner.class);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer producer= newKafkaProducer(props);returnproducer;
}
}