消息存储到 CommitLog 文件中后,需要及时更新 ConsumeQueue 和 Index 文件,保证消息能被及时消费和查询。
文章目录
更新消费队列和索引文件
启动 Broker时,会启动一个 ReputMessageService 线程用来转发新增加的消息给调度任务列表 dispatcherList 去处理
public class DefaultMessageStore implements MessageStore {
private final ReputMessageService reputMessageService;
private final LinkedList<CommitLogDispatcher> dispatcherList;
// DefaultMessageStore 构造器中初始化
this.reputMessageService = new ReputMessageService();
this.dispatcherList = new LinkedList<>();
this.dispatcherList.addLast(new CommitLogDispatcherBuildConsumeQueue());
this.dispatcherList.addLast(new CommitLogDispatcherBuildIndex());
// DefaultMessageStore.start()
// maxPhysicalPosInLogicQueue CommitLog 文件的最小物理偏移量
long maxPhysicalPosInLogicQueue = commitLog.getMinOffset();
// 更新为消费队列中最大的偏移量
for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
for (ConsumeQueue logic : maps.values()) {
if (logic.getMaxPhysicOffset() > maxPhysicalPosInLogicQueue) {
maxPhysicalPosInLogicQueue = logic.getMaxPhysicOffset();
}
}
}
...忽略 DLedger
// 从此位置开始分发
this.reputMessageService.setReputFromOffset(maxPhysicalPosInLogicQueue);
this.reputMessageService.start();
}
CommitLogDispatcherBuildConsumeQueue 更新 ConsumeQueue 消息消费队列索引文件
CommitLogDispatcherBuildIndex 更新 IndexFile 索引文件
ReputMessageService 启动之后,每隔 1ms 执行一次分发
class ReputMessageService extends ServiceThread {
private void doReput() {
...
// 一直到最后一个 CommitLog 文件的最大有效数据的位置
for (boolean doNext = true; this.isCommitLogAvailable() && doNext; ) {
...
// 从此偏移量开始找到这个 CommitLog 文件的所有数据
SelectMappedBufferResult result = DefaultMessageStore.this.commitLog.getData(reputFromOffset);
if (result != null) {
try {
this.reputFromOffset = result.getStartOffset();
for (int readSize = 0; readSize < result.getSize() && doNext; ) {
// 每次读取一条消息
DispatchRequest dispatchRequest =
DefaultMessageStore.this.commitLog.checkMessageAndReturnSize(result.getByteBuffer(), false, false);
int size = dispatchRequest.getBufferSize() == -1 ? dispatchRequest.getMsgSize() : dispatchRequest.getBufferSize();
if (dispatchRequest.isSuccess()) {
if (size > 0) {
// 获取到消息才进行分发
for (CommitLogDispatcher dispatcher : this.dispatcherList) {
dispatcher.dispatch(req);
}
...通知消费者消费
this.reputFromOffset += size;
readSize += size;
...
} else if (size == 0) {
// 消息为空,切换到下个文件
this.reputFromOffset = DefaultMessageStore.this.commitLog.rollNextFile(this.reputFromOffset);
readSize = result.getSize();
}
} else if (!dispatchRequest.isSuccess()) {
...特殊情况处理
}
public void run() {
while (!this.isStopped()) {
try {
Thread.sleep(1);
this.doReput();
} catch (Exception e) {
DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
}
}
}
}
CommitLogDispatcherBuildConsumeQueue 构建消息消费队列索引文件
class CommitLogDispatcherBuildConsumeQueue implements CommitLogDispatcher {
public void dispatch(DispatchRequest request) {
final int tranType = MessageSysFlag.getTransactionValue(request.getSysFlag());
switch (tranType) {
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
// 普通消息和提交事务的消息
DefaultMessageStore.this.putMessagePositionInfo(request);
break;
case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
break;
}
}
}
获取到主题和队列对应的 ConsumeQueue 后,写入消息到 buffer
org.apache.rocketmq.store.ConsumeQueue#putMessagePositionInfoWrapper
public void putMessagePositionInfoWrapper(DispatchRequest request) {
final int maxRetries = 30;
// 刷盘不异常就能写
boolean canWrite = this.defaultMessageStore.getRunningFlags().isCQWriteable();
// 失败重试30次
for (int i = 0; i < maxRetries && canWrite; i++) {
long tagsCode = request.getTagsCode();
...
// 追加到内存文件中,不刷盘,ConsumeQueue 固定为异步刷盘
boolean result = this.putMessagePositionInfo(request.getCommitLogOffset(),
request.getMsgSize(), tagsCode, request.getConsumeQueueOffset());
if (result) {
// 设置 consumeQueue 文件刷盘时间,之后异常恢复文件以从此时间为准
this.defaultMessageStore.getStoreCheckpoint().setLogicsMsgTimestamp(request.getStoreTimestamp());
return;
} else {
Thread.sleep(1000);
}
}
}
文件结构
存储结构
默认包含 30w个条目,每个条目 20B
org.apache.rocketmq.store.ConsumeQueue#putMessagePositionInfo
private boolean putMessagePositionInfo(final long offset, final int size, final long tagsCode,
final long cqOffset) {
this.byteBufferIndex.flip();
this.byteBufferIndex.limit(CQ_STORE_UNIT_SIZE);
// 记录此消息的条目信息
this.byteBufferIndex.putLong(offset);
this.byteBufferIndex.putInt(size);
this.byteBufferIndex.putLong(tagsCode);
final long expectLogicOffset = cqOffset * CQ_STORE_UNIT_SIZE;
// 按照逻辑偏移量获取到队列下最后一个文件
MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile(expectLogicOffset);
if (mappedFile != null) {
...
// 记录 CommitLog 最大可消费的物理偏移量
this.maxPhysicOffset = offset + size;
return mappedFile.appendMessage(this.byteBufferIndex.array());
}
return false;
}
消费队列刷盘
org.apache.rocketmq.store.DefaultMessageStore#flushConsumeQueueService 服务用来执行 consumeQueue 刷盘,每次执行刷盘默认间隔 1s,默认脏页至少为2页才执行刷盘。每次执行 doFlush 时,遍历每个主题下的每个 consumeQueue,然后执行刷盘。
org.apache.rocketmq.store.DefaultMessageStore.FlushConsumeQueueService#doFlush
private void doFlush(int retryTimes) {
...
ConcurrentMap<String, ConcurrentMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;
for (ConcurrentMap<Integer, ConsumeQueue> maps : tables.values()) {
for (ConsumeQueue cq : maps.values()) {
boolean result = false;
for (int i = 0; i < retryTimes && !result; i++) {
result = cq.flush(flushConsumeQueueLeastPages);
}
}
}
// StoreCheckpoint 刷盘前记录了 logicsMsgTimestamp
if (0 == flushConsumeQueueLeastPages) {
if (logicsMsgTimestamp > 0) {
// 防止刷新检查点时有新消息进入 consumeQueue,实际上这部分数据是没有执行 ConsumeQueue 刷盘的
DefaultMessageStore.this.getStoreCheckpoint().setLogicsMsgTimestamp(logicsMsgTimestamp);
}
DefaultMessageStore.this.getStoreCheckpoint().flush();
}
}
org.apache.rocketmq.store.StoreCheckpoint 存储检查时间点服务,对应的物理文件为 checkpoint,用于存储 commitLog、consumeQueue、index 文件的刷盘时间,用于 Broker 恢复。
public class StoreCheckpoint {
...映射文件
private volatile long physicMsgTimestamp = 0; // commitLog文件刷盘时间
private volatile long logicsMsgTimestamp = 0; // consumeQueue文件刷盘时间
private volatile long indexMsgTimestamp = 0; // index文件刷盘时间
}
默认超过60s还没有执行过 StoreCheckpoint 刷盘,就进行一次刷盘。
CommitLogDispatcherBuildIndex 构建索引
class CommitLogDispatcherBuildIndex implements CommitLogDispatcher {
@Override
public void dispatch(DispatchRequest request) {
if (DefaultMessageStore.this.messageStoreConfig.isMessageIndexEnable()) {
DefaultMessageStore.this.indexService.buildIndex(request);
}
}
}
public void buildIndex(DispatchRequest req) {
// 创建索引文件
IndexFile indexFile = retryGetAndCreateIndexFile();
if (indexFile != null) {
long endPhyOffset = indexFile.getEndPhyOffset();
DispatchRequest msg = req;
String topic = msg.getTopic();
String keys = msg.getKeys();
// 重复索引不添加
if (msg.getCommitLogOffset() < endPhyOffset) {
return;
}
...
// 若唯一键不为空,创建一个索引
if (req.getUniqKey() != null) {
indexFile = putKey(indexFile, msg, buildKey(topic, req.getUniqKey()));
...
}
// 若创建消息时指定了多个key,创建多个索引
if (keys != null && keys.length() > 0) {
String[] keyset = keys.split(MessageConst.KEY_SEPARATOR);
for (int i = 0; i < keyset.length; i++) {
String key = keyset[i];
if (key.length() > 0) {
indexFile = putKey(indexFile, msg, buildKey(topic, key));
...
}
索引文件的具体结构
文件名fileName是以创建时的时间戳命名的,文件大小是固定的,等于40+500W4+2000W20= 420000040个字节大小。40 Byte 的Header用于保存一些总的统计信息,4*500W的 Slot Table并不保存真正的索引数据,而是保存每个槽位对应的单向链表的头。20*2000W 是真正的索引数据,即一个 Index File 可以保存 2000W个索引。
新建一个索引
org.apache.rocketmq.store.index.IndexFile#putKey
/**
* 在 IndexFile 中新建一个索引
*
* @param key 消息索引
* @param phyOffset 物理偏移量
* @param storeTimestamp 消息存储时间
* @return
*/
public boolean putKey(final String key, final long phyOffset, final long storeTimestamp) {
// 如果已存索引数量 >= 2000w,返回 false,然后创建一个新的索引文件继续新建
if (this.indexHeader.getIndexCount() < this.indexNum) {
int keyHash = indexKeyHashMethod(key);
// 通过 key 的 hash 值计算出在 Slot Table 中的下标
int slotPos = keyHash % this.hashSlotNum;
// 计算对应 Slot Table 的物理地址 = 固定的头长度 40B + 前面已存槽的总长度(slotPos * 4B)
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
try {
// 计算槽存储的 LinkedList 下标
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()) {
slotValue = invalidIndex;
}
// 计算消息的存储时间和索引文件中第一条记录的时间差
long timeDiff = storeTimestamp - this.indexHeader.getBeginTimestamp();
timeDiff = timeDiff / 1000;
if (this.indexHeader.getBeginTimestamp() <= 0) {
timeDiff = 0;
} else if (timeDiff > Integer.MAX_VALUE) {
timeDiff = Integer.MAX_VALUE;
} else if (timeDiff < 0) {
timeDiff = 0;
}
// 新消息的起始物理偏移量
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ this.indexHeader.getIndexCount() * indexSize;
// index条目的存储结构:hashCode + phyOffset + timeDiff + pre index no(上一条目录的index下标位置)
this.mappedByteBuffer.putInt(absIndexPos, keyHash);
this.mappedByteBuffer.putLong(absIndexPos + 4, phyOffset);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8, (int) timeDiff);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8 + 4, slotValue);
// 当前Index包含的数量存入Hash槽中,即自己在 LinkedList 中的下标
this.mappedByteBuffer.putInt(absSlotPos, this.indexHeader.getIndexCount());
// 首个索引,记录消息起始物理偏移量和存储的消息的最小时间
if (this.indexHeader.getIndexCount() <= 1) {
this.indexHeader.setBeginPhyOffset(phyOffset);
this.indexHeader.setBeginTimestamp(storeTimestamp);
}
this.indexHeader.incHashSlotCount();
// 已存在的索引个数加一
this.indexHeader.incIndexCount();
this.indexHeader.setEndPhyOffset(phyOffset);
this.indexHeader.setEndTimestamp(storeTimestamp);
return true;
} catch (Exception e) {
} finally {
}
} else {
}
return false;
}
按索引检索消息
org.apache.rocketmq.store.index.IndexFile#selectPhyOffset
/**
* 根据索引查找消息
*
* @param phyOffsets 查找到的消息偏移量
* @param key 消息key
* @param maxNum 查找的最大数目
* @param begin 开始时间戳
* @param end 结束时间戳
* @param lock
*/
public void selectPhyOffset(final List<Long> phyOffsets, final String key, final int maxNum,
final long begin, final long end, boolean lock) {
if (this.mappedFile.hold()) {
int keyHash = indexKeyHashMethod(key);
// 找到 key 对应的槽
int slotPos = keyHash % this.hashSlotNum;
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
try {
// 获取到在 LinkedList 中的下标
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()
|| this.indexHeader.getIndexCount() <= 1) {
// 下标不在范围内
} else {
// 处理 hash 冲突的情况,自己存储了之前 hash 值相同的另一个消息所在的下标
for (int nextIndexToRead = slotValue; ; ) {
// 消息取够了就返回
if (phyOffsets.size() >= maxNum) {
break;
}
// 计算绝对地址
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ nextIndexToRead * indexSize;
int keyHashRead = this.mappedByteBuffer.getInt(absIndexPos);
long phyOffsetRead = this.mappedByteBuffer.getLong(absIndexPos + 4);
long timeDiff = (long) this.mappedByteBuffer.getInt(absIndexPos + 4 + 8);
int prevIndexRead = this.mappedByteBuffer.getInt(absIndexPos + 4 + 8 + 4);
// 消息失效
if (timeDiff < 0) {
break;
}
timeDiff *= 1000L;
// 计算还原出此消息的实际存储时间
long timeRead = this.indexHeader.getBeginTimestamp() + timeDiff;
boolean timeMatched = (timeRead >= begin) && (timeRead <= end);
// hash 和时间都满足,加入到结果
if (keyHash == keyHashRead && timeMatched) {
phyOffsets.add(phyOffsetRead);
}
// 验证前一条消息的索引
if (prevIndexRead <= invalidIndex
|| prevIndexRead > this.indexHeader.getIndexCount()
|| prevIndexRead == nextIndexToRead || timeRead < begin) {
break;
}
// 可能存在 hash 冲突,继续找
nextIndexToRead = prevIndexRead;
}
}
} catch (Exception e) {
log.error("selectPhyOffset exception ", e);
} finally {
this.mappedFile.release();
}
}
}