废话不多说,先上代码:
public class KafkaClientWrapper {
private Producer<String, String> producer = getProducer();
private static final Random rand =new Random();
private Producer<String, String> getProducer() {
Properties properties = new Properties();
properties.put("metadata.broker.list",Conf.KAFKA_METADATA_BROKER_LIST);
properties.put("serializer.class",Conf.KAFKA_SERIALIZER_CLASS);
properties.put("request.required.acks",Conf.KAFKA_REQUEST_REQUIRED_ACKS);
properties.put("partitioner.class",Conf.KAFKA_CLIENT_PRODUCER);
producer = new Producer<>(new ProducerConfig(properties));
return producer;
}
public void sendEvent(String message,String key) {
this.produce(Conf.HUATUO_EVENTS,message,key);
}
public void sendFlinkEvent(String message,String key) {
this.produce(Conf.HUATUO_EVENTS_FLINK,message, key);
}
// public void sendGauge(String message,String key) {
// this.produce(Conf.HUATUO_GAUGES,message, key);
// }
public void sendGauge(List<HashMap<String,Object>> message, String key) {
this.produceList(Conf.HUATUO_GAUGES,message, key);
}
public void sendTimer(List<HashMap<String,Object>> message,String key) {
this.produceList(Conf.HUATUO_TIMERS,message, key);
}
public boolean produceList(String topic, List<HashMap<String,Object>> msgs,String key) {
if(SysUtil.isNull(key)){
key = UUID.randomUUID().toString();
}
List<KeyedMessage<String, String>> list = new ArrayList<>();
for (HashMap<String,Object> msg:msgs){
list.add(new KeyedMessage<>(topic, key, JSONObject.fromObject(msg).toString()));
}
try {
producer.send(list);
} catch (Exception e) {
LogUtil.commError(e,"produce list msg failed");
return false;
}
return true;
}
public boolean produce(String topic, String msg,String key) {
if(SysUtil.isNull(key)){
key = UUID.randomUUID().toString();
}
try {
producer.send(new KeyedMessage<>(topic, key, msg));
} catch (Exception e) {
LogUtil.commError(e,"produce msg failed");
return false;
}
return true;
}
public void stop() {
LogUtil.commInfo("invoking kafka stop()!");
closeProducerSafely(producer);
}
private void closeProducerSafely(Producer<String,String> producer) {
if (producer != null) {
try {
producer.close();
} catch(Exception e){
LogUtil.commError(e,"Error when closing producer");
}
}
}
}
多一个配置properties.put("partitioner.class",Conf.KAFKA_CLIENT_PRODUCER);
本来key取随机数,现在根据入参的hashCode值来负载均衡,
指向类:
public class CidPartitioner implements Partitioner {
@SuppressWarnings("unused")
public CidPartitioner(VerifiableProperties props) {
// 注意 : 构造函数的函数体没有东西,但是不能没有构造函数
}
@Override
public int partition(Object key, int numPartitions) {
try {
long partitionNum = key.hashCode();
return (int) Math.abs(partitionNum % numPartitions);
} catch (Exception e) {
LogUtil.commError(e,e.getMessage());
return Math.abs(key.hashCode() % numPartitions);
}
}
}
produceList方法为批量发送,效率比循环发送高