接上: kafka 简易发送/接收框架 代码之2
MsgTopic 消息主题常量:
package org.test;
/**
* 消息主题常量,
* 消息分类常量,
* 消息分类字典,
* 可根据情况增加/修改
*
* @author guishuanglin 2019-09-5
*/
public class MsgTopic {
//==================== 消息体中一些公用字段 =======================
/** 项目名称*/ public static final String MSG_PROJECT = "project";
//==================== 消息主题常量 =======================
/** 系统资料共享 */ public static final String TOPIC_SYS_ARCH = "sys-arch"; //在修改系统资料后,发布给各子系统
/** 数据主动上报 */ public static final String TOPIC_DATA_UP = "data-up"; //有些设备数据是定时主动上报的
/** 命令主动发送 */ public static final String TOPIC_COMMAND_SEND = "command-send";//开关命令,抄数据命令等等
/** 命令结果回复 */ public static final String TOPIC_COMMAND_RECE = "command-rece";//(回复数据或回复命令执行结果)
//==================== 命令子主题常量 =======================
/** [读]在线状态 */ public static final String COMMAND_R_STATE = "r-state";
/** [读]开关命令 */ public static final String COMMAND_R_SWITCH = "r-switch";
/** [写]开关命令 */ public static final String COMMAND_W_SWITCH = "w-switch";
/** [读]温度数据 */ public static final String COMMAND_R_TEMPERATURE = "r-temperature";
/** [写]温度数据 */ public static final String COMMAND_W_TEMPERATURE = "r-temperature";
//==================== 上报子主题常量 =======================
/** 上报警告数据 */ public static final String DATA_UP_ALARM = "alarm";
/** 上报功率数据 */ public static final String DATA_UP_POWER = "power";
}
MsgUtils 消息内部工具, 为了不引用其它项目的工具特copy了一些工具独立使用:
package org.test;
import java.math.BigDecimal;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* 内部用到的工具.
* 收发消息见:KafkaTemplate
*
* @author guishuanglin 2019-09-5
*/
public class MsgUtils {
private static final int scalev = 4;//默认浮点小数位
private static long serial = 1;
private static String hostIp = null;
/**
* 产生一个19位数内部ID, 10位时间 +3位随机数 +6位本地序号
*/
public synchronized static long getMsgId19() {
long xd = System.currentTimeMillis() /1000;
int randomInt = (int) Math.round(Math.random() * 1000);
xd = xd * 1000 + randomInt;
if(serial > 999999) { serial = 1; }
long localId = serial ++;
return xd * 1000000 + localId;
}
/**
* 获取本机IP
*/
public static String getHostIP() {
if(hostIp == null) {
try {
hostIp = InetAddress.getLocalHost().getHostAddress();
System.out.println(hostIp);
} catch (UnknownHostException e) {
hostIp = "localhost";
e.printStackTrace();
}
}
return hostIp;
}
/**
* 把Object,转化成String对象
*/
public static String getString(Object data){
if(data == null) return null;
if(data instanceof String){
return trimEmpty((String)data);
}
if(data instanceof Integer){
return Integer.toString(((Integer)data).intValue());
}
if(data instanceof Long){
return Long.toString(((Long)data).longValue());
}
if(data instanceof Double){
return BigDecimal.valueOf(((Double)data).doubleValue()).setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof Float){
return BigDecimal.valueOf(((Float)data).doubleValue()).setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof BigDecimal){
BigDecimal bb =(BigDecimal)data;
return bb.scale()==0? bb.toString() : bb.setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof Short){
return Short.toString(((Short)data).shortValue());
}
if(data instanceof Byte){
return Byte.toString(((Byte)data).byteValue());
}//日期对象不处理
String s = String.valueOf(data).trim();
s = trimEmpty(s);
data = null;
return s;
}
/**
* 把Object,转化成String对象
* @date 2008-10-25
*/
public static Long getLong(Object data){
if(data==null){ return null; }
if(data instanceof Long){
return (Long)data;
}
if(data instanceof Integer){
return Long.valueOf(((Integer)data).longValue());
}
if(data instanceof Short){
return Long.valueOf(((Short)data).longValue());
}
if(data instanceof Double){
return Long.valueOf(((Double)data).longValue());
}
if(data instanceof Float){
return Long.valueOf(((Float)data).longValue());
}
if(data instanceof BigDecimal){
return Long.valueOf(((BigDecimal)data).longValue());
}
if(data instanceof Byte){
return Long.valueOf(((Byte)data).longValue());
}
String s = String.valueOf(data).trim();
if(s.equals("")){ s = null; return null; }
Long v = null;
try {
v = new Long(s);
} catch (Exception e) {
data = null; v = null;
System.out.println("对象 "+s+" 转换成 Long 数据错误.");
} finally {
data = null;
s = null;
}
return v;
}
/**
* 把Object, 转化成指定小数位的BigDecimal对象
* @date 2008-10-25
*/
public static BigDecimal getBigDecimal(Object data,int scale){
if(data==null){ return null; }
if(data instanceof BigDecimal){
return ((BigDecimal)data).setScale(scale, BigDecimal.ROUND_HALF_UP);
}
if(data instanceof Double){
BigDecimal v = BigDecimal.valueOf(((Double)data).doubleValue())
.setScale(scale, BigDecimal.ROUND_HALF_UP);
return v;
}
if(data instanceof Float){
BigDecimal v = BigDecimal.valueOf(((Float)data).doubleValue())
.setScale(scale, BigDecimal.ROUND_HALF_UP);
return v;
}
if(data instanceof Long){
return BigDecimal.valueOf(((Long)data).longValue());
}
if(data instanceof Integer){
return BigDecimal.valueOf(((Integer)data).longValue());
}
String s = String.valueOf(data).trim();
if(s.equals("")){ s = null; return null; }
BigDecimal v = null;
try {
v = new BigDecimal(s).setScale(scale, BigDecimal.ROUND_HALF_UP);
} catch (Exception e) {
data = null; v = null;
System.out.println("对象 "+s+" 转换成 BigDecimal 数据错误.");
} finally {
data = null; s = null;
}
return v;
}
public static String trimEmpty(String o){
if(o == null) return null;
String str = o.trim();
if(str == null ||str.equals("") ||str.equals("null") ||str.equals("NULL")) return null;
return str;
}
public static void main(String[] args) {
int randomNum = (int) Math.round(Math.random() * 1000);
long seqId = MsgUtils.getMsgId19();
System.out.println(seqId);
}
}
ReceiveKafkaThread 消息接收线程:
package org.test;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import com.alibaba.fastjson.JSON;
/**
* 消息接收线程(单线程运行)<br>
* 请在 KafkaTemplate 中调用方法启动
*
* @author guishuanglin 2019-09-5
*
*/
public class ReceiveKafkaThread implements Runnable {
private Log logger = LogFactory.getLog(this.getClass());
private String fn = "Kafka接收线程";
private static boolean isRuning = false;
private static String projectName = null;
private static boolean isStop = false;
//接收参数配置
private static AbstractConfig cConfig = null;
//接收监听对象
private static KafkaConsumer<String, String> consumer = null;
//接收预处理
private static IReceivePreprocessor preprocessor = null;
private static CountDownLatch stopSignal = new CountDownLatch(1);
/**
* @param config 接收配置
* @param prepr 收到消息,预处理逻辑
*/
public ReceiveKafkaThread(AbstractConfig config, IReceivePreprocessor prepr) {
super();
cConfig = config;
preprocessor = prepr;
}
@Override
public void run() {
if(isRuning) return;
if(! init()) return;
isRuning = true;
while (! isStop) {
try{
long tvb = System.currentTimeMillis();
int count = 0;
ConsumerRecords<String, String> records = consumer.poll(200);
count = records.count();
for (ConsumerRecord<String, String> record : records) {
processMsg(record, projectName);
}
long tve = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
logger.debug("完成接收"+ count +"条消息, 耗时:"+ (tve - tvb) +" ms");
}
}catch(Exception e){
logger.error("读取Kafka消息出错: ", e);
} finally{
//consumer只能单线程,在多线程时不安全.
}
}
isStop = true;
isRuning = false;
stopSignal.countDown();
}
/**
* 处理kafka信息, 此处用单线程接收,保证消息顺序.<br>
* 注意: 不要在此类采用多线程处理, 因为消息是有顺序的, 如果在此启用多线程接收, 可能消息前后顺序会乱.<br>
* 正确的多线程处理方式是在 IMsgBusiness 实现接收消息队列, 快速接收消息, 然后采用多线程处理队列消息.<br>
*/
private void processMsg(ConsumerRecord<String, String> record, String projectName) {
try{
Map<String,Object> msgdata = JSON.parseObject(record.value());
String project = MsgUtils.getString( msgdata.get(MsgTopic.MSG_PROJECT) );
if( project != null && project.equals(projectName)) {
//接收消息进行预处理
MsgRecord msgRecord = preprocessor.preprocessor(record, msgdata);
String subTopic = msgRecord.getSubTopic();
//取到消息子类才能定位回调处理
IReceiveCallback back = ReceiveProcessFactory.getReceiveCallback(record.topic(), subTopic);
if (back != null) {
back.receiveProcess(msgRecord);
} else {
if(logger.isDebugEnabled()) {
logger.debug("收到未知主题消息:"+ record.toString());
}
}
}else {
if(logger.isDebugEnabled()) {
logger.debug("收到未知项目消息:"+ record.toString());
}
}
}catch(Exception e){
logger.error("处理Kafka消息出错: ", e);
} finally{
}
}
/**
* KafkaConsumer 接收线程是否停止.
* @return
*/
private boolean init() {
boolean br = false;
if(consumer != null) { return true; }
if(cConfig == null) {
logger.error(fn + ", 启动消息监听线程失败: kafka 配置为空, 请先设置 AbstractConfig 参数");
return br;
}
if(! cConfig.checkConfig()){
return br;
}
if(preprocessor == null) {
logger.error(fn + ", 启动消息监听线程失败: 接收预处理对象为空, 请先设置 IReceivePreprocessor 参数");
return br;
}
projectName = cConfig.getProjectName();
logger.info("kafka接收配置, clientId ="+cConfig.getClientId() +", groupId ="+cConfig.getGroupId()+", Servers ="+cConfig.getServers());
KafkaConsumer<String, String> _consumer = new KafkaConsumer<String, String>( cConfig.getPropertiesConfig() );
_consumer.subscribe(cConfig.getTopics());
consumer = _consumer;
br = true;
return br;
}
/**
* KafkaConsumer 接收线程是否停止.
* @return
*/
public static boolean isStop() {
return isStop;
}
/**
* 停止运行 KafkaConsumer 接收线程,并且关闭 KafkaConsumer.
* @return
*/
public static boolean stop() {
isStop = true;
stopSignal = new CountDownLatch(1);
try {
stopSignal.await();
} catch (Exception e) {
e.printStackTrace();
}
consumer = null;
cConfig = null;
preprocessor = null;
isRuning = false;
return isStop;
}
}
ReceiveProcessFactory 接收回调处理业务工厂:
package org.test;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* 接收回调处理业务工厂<br>
* 如果需要接收消息进行处理, 接收之前, 请增加消息处理回调.<br>
* <br>
* 注意: 如果发送消息时, 调用带有回调参数的方法, 则会自己增加到此工厂; 也可以在实现IReceiveCallback时的实现类中增加到工厂.
*
* @author guishuanglin 2019-09-5
*/
public class ReceiveProcessFactory {
private static final Map<String, IReceiveCallback> PROCESS_MAP = new ConcurrentHashMap<String, IReceiveCallback>();
public static IReceiveCallback getReceiveCallback(String topic, String subTopic) {
return PROCESS_MAP.get(topic +"_"+subTopic);
}
public static void setReceiveCallback(IReceiveCallback callback, String topic, String subTopic) {
String key = topic +"_"+subTopic;
if(PROCESS_MAP.containsKey(key)) {
return;
}
PROCESS_MAP.put(key, callback);
}
}
kafka 简易发送/接收框架 代码全部完成 (此代码已经过测试)
说明: 代码可以免费用于商业用途, 但请保留作者信息.
kafka的安装, 包的下载, windows版的运行, 请参考网上其它文档.
提醒: kafka 发送与回复是异步方式的, 发送时用发送主题, 接收时用接收主题. 这也是kafka速度快的一个原因. 当然,现实中我们需要发送时等待回复, 这也是本框架中定义发送/接收回调类的原因, 通过回调类就可以实现发送/接收的单独配合处理. 又不影响kafka的效率..