1.flume的安装部署
1.1 安装地址
官方安装地址
文档查看地址
http://flume.apache.org/FlumeUserGuide.html
下载地址
http://archive.apache.org/dist/flume/
1.2 安装部署
我使用的是flume1.8版本的
- 将 apache-flume-1.8.0-bin.tar.gz 上传到 linux 的`/home/sugon` 目录下,拷贝至 `/data/`或其他自定目录下
- 解压 apache-flume-1.8.0-bin.tar.gz `tar -zxf apache-flume-1.8.0-bin.tar.gz`
- 将 `flume/conf/` 下的 `flume-env.sh.template` 文件修改为 `flume-env.sh`,并配置` flume-env.sh `文件
mv flume-env.sh.template flume-env.sh
vim flume-env.sh
#写入iava_home
export JAVA_HOME=/usr/software/jdk1.8.0_281
4.可选择对`/flume/conf`下的`log4j.properties`配置文件进行修改,修改后可将日志文件储存在文件中
#flume.root.logger=DEBUG,console
flume.root.logger=INFO,LOGFILE
2.flume与rocketMq结合
2.1 下载与编译rocketmq-flume-sink
gitee下载地址:https://gitee.com/mirrors/RocketMQ-Externals/tree/master/rocketmq-flume/ ,只要rocketmq-flume-sink就行,其余可以不用,
具体代码内容如下:
- 里面rocketmq的信息先不填,在flume的配置文件中写即可
package org.apache.rocketmq.flume.ng.sink;
public class RocketMQSinkConstants {
public static final String NAME_SERVER_CONFIG = "nameserver";
public static final String TOPIC_CONFIG = "topic";
public static final String TOPIC_DEFAULT = "";
public static final String TAG_CONFIG = "tag";
public static final String TAG_DEFAULT = "";
public static final String PRODUCER_GROUP_CONFIG = "producerGroup";
public static final String PRODUCER_GROUP_DEFAULT = "";
public static final String BATCH_SIZE_CONFIG = "batchSize";
public static final int BATCH_SIZE_DEFAULT = 32;
public static final String MAX_PROCESS_TIME_CONFIG = "maxProcessTime";
public static final long MAX_PROCESS_TIME_DEFAULT = 1000;
}
- 在编译时,需要加上 producer.setVipChannelEnabled(false); 关闭rocketmq的vip通道,否则运行flume时会报错,默认走10911端口,而不是9876端口
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.rocketmq.flume.ng.sink;
import org.apache.rocketmq.client.exception.MQClientException;
import org.apache.rocketmq.client.producer.DefaultMQProducer;
import org.apache.rocketmq.client.producer.SendCallback;
import org.apache.rocketmq.client.producer.SendResult;
import org.apache.rocketmq.common.message.Message;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.FlumeException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.sink.AbstractSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.BATCH_SIZE_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.BATCH_SIZE_DEFAULT;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.MAX_PROCESS_TIME_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.MAX_PROCESS_TIME_DEFAULT;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.NAME_SERVER_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.PRODUCER_GROUP_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.PRODUCER_GROUP_DEFAULT;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.TAG_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.TAG_DEFAULT;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.TOPIC_CONFIG;
import static org.apache.rocketmq.flume.ng.sink.RocketMQSinkConstants.TOPIC_DEFAULT;
/**
*
*/
public class RocketMQSink extends AbstractSink implements Configurable {
private static final Logger log = LoggerFactory.getLogger(RocketMQSink.class);
private String nameServer;
private String topic;
private String tag;
private String producerGroup;
private int batchSize;
private long maxProcessTime;
/** Monitoring counter. */
private SinkCounter sinkCounter;
private DefaultMQProducer producer;
@Override
public void configure(Context context) {
nameServer = context.getString(NAME_SERVER_CONFIG);
if (nameServer == null) {
throw new ConfigurationException("NameServer must not be null");
}
topic = context.getString(TOPIC_CONFIG, TOPIC_DEFAULT);
tag = context.getString(TAG_CONFIG, TAG_DEFAULT);
producerGroup = context.getString(PRODUCER_GROUP_CONFIG, PRODUCER_GROUP_DEFAULT);
batchSize = context.getInteger(BATCH_SIZE_CONFIG, BATCH_SIZE_DEFAULT);
maxProcessTime = context.getLong(MAX_PROCESS_TIME_CONFIG, MAX_PROCESS_TIME_DEFAULT);
if (sinkCounter == null) {
sinkCounter = new SinkCounter(getName());
}
}
@Override
public synchronized void start() {
producer = new DefaultMQProducer(producerGroup);
producer.setVipChannelEnabled(false);
producer.setNamesrvAddr(nameServer);
try {
producer.start();
} catch (MQClientException e) {
sinkCounter.incrementConnectionFailedCount();
log.error("RocketMQ producer start failed", e);
throw new FlumeException("Failed to start RocketMQ producer", e);
}
sinkCounter.incrementConnectionCreatedCount();
sinkCounter.start();
super.start();
}
@Override
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = null;
try {
transaction = channel.getTransaction();
transaction.begin();
/*
batch take
*/
List<Event> events = new ArrayList<>();
long beginTime = System.currentTimeMillis();
while (true) {
Event event = channel.take();
if (event != null) {
events.add(event);
}
if (events.size() == batchSize
|| System.currentTimeMillis() - beginTime > maxProcessTime) {
break;
}
}
if (events.size() == 0) {
sinkCounter.incrementBatchEmptyCount();
transaction.rollback();
return Status.BACKOFF;
}
/*
async send
*/
CountDownLatch latch = new CountDownLatch(events.size());
AtomicInteger errorNum = new AtomicInteger();
for (Event event : events) {
byte[] body = event.getBody();
Message message = new Message(topic, tag, body);
if (log.isDebugEnabled()) {
log.debug("Processing event,body={}", new String(body, "UTF-8"));
}
producer.send(message, new SendCallBackHandler(message, latch, errorNum));
}
latch.await();
sinkCounter.addToEventDrainAttemptCount(events.size());
if (errorNum.get() > 0) {
log.error("errorNum=" + errorNum + ",transaction will rollback");
transaction.rollback();
return Status.BACKOFF;
} else {
transaction.commit();
sinkCounter.addToEventDrainSuccessCount(events.size());
return Status.READY;
}
} catch (Throwable e) {
log.error("Failed to processing event", e);
if (transaction != null) {
try {
transaction.rollback();
} catch (Throwable ex) {
log.error("Failed to rollback transaction", ex);
throw new EventDeliveryException("Failed to rollback transaction", ex);
}
}
return Status.BACKOFF;
} finally {
if (transaction != null) {
transaction.close();
}
}
}
@Override public synchronized void stop() {
producer.shutdown();
sinkCounter.incrementConnectionClosedCount();
sinkCounter.stop();
super.stop();
}
public class SendCallBackHandler implements SendCallback {
private final Message message;
private final CountDownLatch latch;
private final AtomicInteger errorNum;
SendCallBackHandler(Message message, CountDownLatch latch, AtomicInteger errorNum) {
this.message = message;
this.latch = latch;
this.errorNum = errorNum;
}
@Override
public void onSuccess(SendResult sendResult) {
latch.countDown();
if (log.isDebugEnabled()) {
try {
log.debug("Sent event,body={},sendResult={}", new String(message.getBody(), "UTF-8"), sendResult);
} catch (UnsupportedEncodingException e) {
log.error("Encoding error", e);
}
}
}
@Override
public void onException(Throwable e) {
latch.countDown();
errorNum.incrementAndGet();
try {
log.error("Message publish failed,body=" + new String(message.getBody(), "UTF-8"), e);
} catch (UnsupportedEncodingException e1) {
log.error("Encoding error", e);
}
}
}
}
- 完成后进行编译,获取编译后的文件 rocketmq-flume-sink-0.0.2-SNAPSHOT.jar
2.2 flume中需要添加的jar包
上一步编译生成
rocketmq-flume-source-0.0.2-SNAPSHOT.jar
maven仓库下载
fastjson-1.2.12.jar
netty-all-4.0.36.Final.jar
rocketmq-client-4.0.0-incubating.jar
rocketmq-common-4.0.0-incubating.jar
rocketmq-remoting-4.0.0-incubating.jar
将所有jar包放入flume的lib目录下
2.3 flume配置文件编写
目前测试了两种配置方式,包括 Taildir 和 tail -F
TAILDIR 方式
- flume的Taildir监控多个文件夹并且将历史数据和新增数据添加到指定的文件夹中的一个文件
- a1.sources.r1.positionFile: 记录着文件的偏移量,目前该文件在`/data/logs/flume/taildir_position.json`,若将该文件删除,重新启动flume,又会从头开始读取日志文件。
- 在重启flume之前,一定要将之前的进程清理干净,不然会造成读取发送到rocketmq的速度变慢。
配置文件
a1.sources = r1
a1.channels = c1
a1.sinks = k1
a1.sources.r1.type = TAILDIR
a1.sources.r1.channels = c1
a1.sources.r1.positionFile = /data/logs/flume/taildir_position.json
a1.sources.r1.filegroups = f1 f2
a1.sources.r1.filegroups.f1 = a.log #你日志文件的地址,也可以读取同目录下的多个文件,具体请百度
a1.sources.r1.headers.f1.headerKey1 = online1
a1.sources.r1.filegroups.f2 = b.log #你日志文件的地址
a1.sources.r1.headers.f2.headerKey1 = online2
a1.sources.r1.fileHeader = true
a1.sources.ri.maxBatchCount = 1000
# Bind the source and sink to the channel
a1.sinks.k1.type=org.apache.rocketmq.flume.ng.sink.RocketMQSink
a1.sinks.k1.nameserver=rocketmq地址:9876 #以下rocketmq的信息都改成自己的
a1.sinks.k1.producerGroup=组
a1.sinks.k1.topic=主题
a1.sinks.k1.tag=标签
a1.sinks.k1.channel=c1
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 1000
a1.channels.c1.keep-alive=10
tail -F 方式
- tail -F -c +0 此为读取原先日志的数据,并且读取新增的数据
- tail -F 只读取新增的数据
- flume使用 exec source 时,可能会导致数据丢失,所以在实际生产环境中并不建议使用。
配置文件
a1.sources = r1
a1.channels = c1
a1.sinks = k1
a1.sources.r1.type = exec
a1.sources.r1.command =tail -F -c +0 a.log
a1.sources.r1.channels = c1
# Bind the source and sink to the channel
a1.sinks.k1.type=org.apache.rocketmq.flume.ng.sink.RocketMQSink
a1.sinks.k1.namesrvAddr= #写自己的rocketmq信息
a1.sinks.k1.producerGroup=
a1.sinks.k1.topic=
a1.sinks.k1.tag=
a1.sinks.k1.channel=c1
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 1000
a1.channels.c1.keep-alive=20
2.4 启动flume
后台运行启动flume命令如下:
nohup /data/flume/bin/flume-ng agent -c /data/flume/conf -f /data/flume/conf/flume.conf -n a1 -Dflume.root.logger=INFO,LOGFILE &
注意:
- 每一步都要写全路径,包括flume-ng命令路径,要启动的配置文件所在文件夹路径,配置文件所在路径,否则会报以下的错
+ exec /usr/java/jdk1.8.0_121/bin/java -Xmx20m -Dflume.root.logger=INFO,console -cp 'conf:/usr/local/flume/lib/*:/lib/*' -Djava.library.path= org.apache.flume.node.Application -f conf/flume.conf -n agent1
log4j:WARN No appenders could be found for logger (org.apache.flume.node.Application).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
2. 日志存放路径在启动flume时路径下的logs目录中
2.5 rocketmq控制台信息
若成功完成以上步骤没有出现错误,rocketmq的控制台看见传过来的信息
3. 在springboot中订阅主题
3.1 引入pom文件
<dependency>
<groupId>org.apache.rocketmq</groupId>
<artifactId>rocketmq-client</artifactId>
<version>4.5.2</version>
</dependency>
3.2 编写代码
private static final Logger LOG = LoggerFactory.getLogger(ReadFromBasLog.class);
//从配置文件中取rocketmq的信息,你也可以写死, 都要和之前flume中配置的一致,tag可以也可以设置,示例里订阅了所有的tag,以*表示
@Value("${rocketmq.consumer.namesrv-addr}")
private String namesrvAddr;
@Value("${rocketmq.consumer.group-name}")
private String groupName;
@Value("${rocketmq.consumer.topics}")
private String topics;
@PostConstruct //启动项目后会自动加载这个方法,目前设置的是BROADCASTING,一旦有消息就会主动推过来
public void runRocketmqCustomer() throws Exception {
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer(groupName);
consumer.setNamesrvAddr(namesrvAddr);
// 批量消费,每次拉取10条
consumer.setConsumeMessageBatchMaxSize(10);
//设置广播消费
consumer.setMessageModel(MessageModel.BROADCASTING);
//设置集群消费
// consumer.setMessageModel(MessageModel.CLUSTERING);
// 如果非第一次启动,那么按照上次消费的位置继续消费
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
// 订阅PushTopic下Tag为push的消息
consumer.subscribe(topics, "*");
consumer.registerMessageListener(new ReadFromBasLog.MyBroadCastListener());
consumer.start();
}
class MyBroadCastListener implements MessageListenerConcurrently {
@Override
public ConsumeConcurrentlyStatus consumeMessage(List<MessageExt> msgs, ConsumeConcurrentlyContext context) {
try {
MessageExt msg = msgs.get(0);
LOG.info(new Date() + msg.toString());
String msgBody = new String(msg.getBody(), "utf-8");
//这里可以写你的业务逻辑代码
} catch (Exception e) {
e.printStackTrace();
return ConsumeConcurrentlyStatus.RECONSUME_LATER;
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
}
}
4. 拓展: 在阿里云服务器部署rocketmq实现该流程注意事项
- 配置安全组规则 ,开放9876端口,10911端口和rocketmq控制台端口。
- 增加pom依赖,改写配置文件
https://blog.csdn.net/u011835473/article/details/98314684 根据这位大佬的博客编写
<dependency>
<groupId>com.aliyun.openservices</groupId>
<artifactId>ons-client</artifactId>
<version>1.8.0.Final</version>
</dependency>
注意:在阿里云服务器实现该流程需要在服务器中申请accessKey和secretKey,accessKey和secretKey具体操作流程请查询百度。
写RocketMQSinkUtil类
package com.handu.flume.sink.rocketmq;
import com.alibaba.rocketmq.client.producer.DefaultMQProducer;
import com.alibaba.rocketmq.client.producer.MQProducer;
import com.google.common.base.Preconditions;
import org.apache.flume.Context;
public class RocketMQSinkUtil {
/**
* Topic配置项,如:a1.sinks.s1.topic=TestTopic
*/
public static final String TOPIC_CONFIG = "topic";
public static final String TOPIC_DEFAULT = "";
/**
* Tags配置项,如:a1.sinks.s1.tag=Tag1,Tag2
*/
public static final String TAG_CONFIG = "tag";
public static final String TAG_DEFAULT = "";
/**
* Producer分组配置项,如:a1.sinks.s1.group=please_rename_unique_group_name
*/
public static final String GROUP_CONFIG = "group";
public static final String GROUP_DEFAULT = "";
/**
* Namesrv地址配置项,如:a1.sinks.s1.namesrvAddr=localhost:9876
*/
public static final String NAMESRV_ADDR_CONFIG = "namesrvAddr";
/**
* ACCESS_KEY地址配置项,如:a1.sinks.s1.accessKey=localhost:9876
*/
public static final String ACCESS_KEY_CONFIG = "accessKey";
public static final String ACCESS_KEY_DEFAULT = "LTAI5t9M6SN**********";
/**
* ACCESS_KEY地址配置项,如:a1.sinks.s1.secretKey=localhost:9876
*/
public static final String SECRET_KEY_CONFIG = "secretKey";
public static final String SECRET_KEY_DEFAULT = "htVyhCJkHV3rMzU**********";
public static MQProducer getProducer(Context context) {
final String producerGroup = context.getString(GROUP_CONFIG, GROUP_DEFAULT);
final String namesrvAddr = Preconditions.checkNotNull(context.getString(NAMESRV_ADDR_CONFIG), "RocketMQ namesrvAddr must be specified. For example: a1.sinks.s1.namesrvAddr=127.0.0.1:9876");
DefaultMQProducer producer = new DefaultMQProducer(producerGroup);
producer.setNamesrvAddr(namesrvAddr);
return producer;
}
}
写RocketMQSink类
package com.handu.flume.sink.rocketmq;
import com.aliyun.openservices.ons.api.*;
import com.google.common.base.Throwables;
import org.apache.flume.*;
import org.apache.flume.conf.Configurable;
import org.apache.flume.sink.AbstractSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
import java.util.Properties;
public class RocketMQSink extends AbstractSink implements Configurable {
private static final Logger LOG = LoggerFactory.getLogger(RocketMQSink.class);
public String TOPIC;
public String GROUP_ID;
public String ACCESS_KEY;
public String SECRET_KEY;
public String TAG;
public String NAMESRV_ADDR;
private Producer producer;
@Override
public void configure(Context context) {
// 获取配置项
TOPIC = context.getString(RocketMQSinkUtil.TOPIC_CONFIG, RocketMQSinkUtil.TOPIC_DEFAULT);
GROUP_ID = context.getString(RocketMQSinkUtil.GROUP_CONFIG, RocketMQSinkUtil.GROUP_DEFAULT);
ACCESS_KEY = context.getString(RocketMQSinkUtil.ACCESS_KEY_CONFIG, RocketMQSinkUtil.ACCESS_KEY_DEFAULT);
SECRET_KEY = context.getString(RocketMQSinkUtil.SECRET_KEY_CONFIG, RocketMQSinkUtil.SECRET_KEY_DEFAULT);
TAG = context.getString(RocketMQSinkUtil.TAG_CONFIG, RocketMQSinkUtil.TAG_DEFAULT);
NAMESRV_ADDR = context.getString(RocketMQSinkUtil.NAMESRV_ADDR_CONFIG);
Properties properties = new Properties();
properties.setProperty(PropertyKeyConst.GROUP_ID, GROUP_ID);
// AccessKey 阿里云身份验证,在阿里云服务器管理控制台创建
properties.put(PropertyKeyConst.AccessKey,ACCESS_KEY);
// SecretKey 阿里云身份验证,在阿里云服务器管理控制台创建
properties.put(PropertyKeyConst.SecretKey, SECRET_KEY);
//设置发送超时时间,单位毫秒
properties.setProperty(PropertyKeyConst.SendMsgTimeoutMillis, "3000");
// 设置 TCP 接入域名,到控制台的实例基本信息中查看
properties.put(PropertyKeyConst.NAMESRV_ADDR,
NAMESRV_ADDR);
producer = ONSFactory.createProducer(properties);
// 在发送消息前,必须调用 start 方法来启动 Producer,只需调用一次即可
// producer.start();
}
@Override
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction tx = channel.getTransaction();
try {
tx.begin();
Event event = channel.take();
if (event == null || event.getBody() == null || event.getBody().length == 0) {
tx.commit();
return Status.READY;
}
//发送消息
Message msg = new Message( TOPIC, TAG, event.getBody());
// 设置代表消息的业务关键属性,请尽可能全局唯一。
// 以方便您在无法正常收到消息情况下,可通过阿里云服务器管理控制台查询消息并补发
// 注意:不设置也不会影响消息正常收发
// msg.setKey("ORDERID_" );
try {
SendResult sendResult = producer.send(msg);
// 同步发送消息,只要不抛异常就是成功
if (sendResult != null) {
// LOG.info(new Date() + " Send mq message success. Topic is:" + msg.getTopic() + " msgId is: " + sendResult.getMessageId());
}
}
catch (Exception e) {
// 消息发送失败,需要进行重试处理,可重新发送这条消息或持久化这条数据进行补偿处理
LOG.error(new Date() + " Send mq message failed. Topic is:" + msg.getTopic());
e.printStackTrace();
}
tx.commit();
return Status.READY;
} catch (Exception e) {
LOG.error("RocketMQSink send message exception", e);
try {
tx.rollback();
return Status.BACKOFF;
} catch (Exception e2) {
LOG.error("Rollback exception", e2);
}
return Status.BACKOFF;
} finally {
tx.close();
}
}
@Override
public synchronized void start() {
try {
// 启动Producer
producer.start();
} catch (Exception e) {
LOG.error("RocketMQSink start producer failed", e);
Throwables.propagate(e);
}
super.start();
}
@Override
public synchronized void stop() {
// 停止Producer
producer.shutdown();
super.stop();
}
}
写完后编译,将生成的jar包和ons-client-1.8.0.Final.jar包放到flume的lib目录下
4.1 flume的配置文件
agent1.sources=source1
agent1.channels=channel1
agent1.sinks=sink1
agent1.sources.source1.type=exec
agent1.sources.source1.command=tail -F /home/logs.txt
agent1.sources.sink1.shell = /bin/sh -c
agent1.sources.source1.channels=channel1
agent1.sinks.sink1.type=com.handu.flume.sink.rocketmq.RocketMQSink
agent1.sinks.sink1.namesrvAddr= #写自己的
agent1.sinks.sink1.producerGroup=
agent1.sinks.sink1.topic=
agent1.sinks.sink1.tag=
agent1.sinks.sink1.channel=channel1
agent1.channels.channel1.type=memory
agent1.channels.channel1.capacity=100
agent1.channels.channel1.transactionCapacity=100
agent1.channels.channel1.keep-alive=3
运行命令,一样都要写全路径
nohup /usr/local/flume/bin/flume-ng agent -c /usr/local/flume/conf -f /usr/local/flume/conf/flume.conf -n agent1 -Dflume.root.logger=INFO,LOGFILE &
如果运行过程出错,可以尝试更换一下依赖的jar包
比如在阿里云服务器上,我用的jar包分别为:
- fastjson-1.2.75.jar
- netty-all-4.0.23.Final.jar
- rocketmq-client-4.7.1.jar
- rocketmq-common-4.7.1.jar
- rocketmq-remoting-4.7.1.jar
- one-client-1.8.0.Final.jar