writer数据发送过程分析
追踪代码:
void WriterHistory::notify_writer(CacheChange_t* a_change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
//调用StatelessWriter发送
mp_writer->unsent_change_added_to_history(a_change, max_blocking_time);
}
调用FlowController的接口:
void StatelessWriter::unsent_change_added_to_history(
CacheChange_t* change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
auto payload_length = change->serializedPayload.length;
// Notify the datasharing readers
// This also prepares the metadata for late-joiners
if (is_datasharing_compatible())
{
datasharing_delivery(change);
}
// Now for the rest of readers
if (!fixed_locators_.empty() || getMatchedReadersSize() > 0)
{
//真实发送接口
flow_controller_->add_new_sample(this, change, max_blocking_time);
}
else
{
}
}
发送链路如下:
bool add_new_sample(
fastrtps::rtps::RTPSWriter* writer,
fastrtps::rtps::CacheChange_t* change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time) override
{
return add_new_sample_impl(writer, change, max_blocking_time);
}
add_new_sample_impl:
template<typename PubMode = PublishMode>
typename std::enable_if<std::is_base_of<FlowControllerPureSyncPublishMode, PubMode>::value, bool>::type
add_new_sample_impl(
fastrtps::rtps::RTPSWriter* writer,
fastrtps::rtps::CacheChange_t* change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
// This call should be made with writer's mutex locked.
fastrtps::rtps::LocatorSelectorSender& locator_selector = writer->get_general_locator_selector();
std::lock_guard<fastrtps::rtps::LocatorSelectorSender> lock(locator_selector);
//构建消息组
fastrtps::rtps::RTPSMessageGroup group(participant_, writer, &locator_selector);
if (fastrtps::rtps::DeliveryRetCode::DELIVERED !=
writer->deliver_sample_nts(change, group, locator_selector,max_blocking_time))
{
}
return true;
}
deliver_sample_nts:
DeliveryRetCode StatefulWriter::deliver_sample_nts(
CacheChange_t* cache_change,
RTPSMessageGroup& group,
LocatorSelectorSender& locator_selector, // Object locked by FlowControllerImpl
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
DeliveryRetCode ret_code = DeliveryRetCode::DELIVERED;
if (there_are_remote_readers_)
{
//通过网络发送
ret_code = deliver_sample_to_network(cache_change, group, locator_selector, max_blocking_time);
}
check_acked_status();
return ret_code;
}
deliver_sample_nts:
DeliveryRetCode StatelessWriter::deliver_sample_nts(
CacheChange_t* cache_change,
RTPSMessageGroup& group,
LocatorSelectorSender& locator_selector, // Object locked by FlowControllerImpl
const std::chrono::time_point<std::chrono::steady_clock>& )
{
uint64_t change_sequence_number = cache_change->sequenceNumber.to64long();
NetworkFactory& network = mp_RTPSParticipant->network_factory();
DeliveryRetCode ret_code = DeliveryRetCode::DELIVERED;
//current_sequence_number_sent_ = 0 change_sequence_number = 1
if (current_sequence_number_sent_ != change_sequence_number)
{
current_sequence_number_sent_ = change_sequence_number;
current_fragment_sent_ = 0;
}
// Send the new sample to intra-process readers.
if (0 == current_fragment_sent_)
{
// matched_local_readers_ 为空,还没有匹配的reader
for_matched_readers(matched_local_readers_, [&, cache_change](ReaderLocator& reader)
{
intraprocess_delivery(cache_change, reader);
return false;
});
}
try
{
// n_fragments = 0
uint32_t n_fragments = cache_change->getFragmentCount();
//m_separateSendingEnabled 默认 false
if (m_separateSendingEnabled)
{
if (0 < n_fragments)
{
}
else
{
//matched_remote_readers_ 此时也为空
for (std::unique_ptr<ReaderLocator>& it : matched_remote_readers_)
{
reader_data_filter_->is_relevant(*cache_change, it->remote_guid()))
{
group.sender(this, &*it);
size_t num_locators = it->locators_size();
if (group.add_data(*cache_change, is_inline_qos_expected_))
{
add_statistics_sent_submessage(cache_change, num_locators);
}
else
{
ret_code = DeliveryRetCode::NOT_DELIVERED;
}
}
}
}
}
else
{
// num_locators = 1
size_t num_locators = locator_selector.locator_selector.selected_size() + fixed_locators_.size();
if (0 < num_locators)
{
// n_fragments = 0
if (0 < n_fragments)
{
for (FragmentNumber_t frag = current_fragment_sent_ + 1;
DeliveryRetCode::DELIVERED == ret_code && frag <= n_fragments; ++frag)
{
if (group.add_data_frag(*cache_change, frag, is_inline_qos_expected_))
{
current_fragment_sent_ = frag;
add_statistics_sent_submessage(cache_change, num_locators);
}
else
{
ret_code = DeliveryRetCode::NOT_DELIVERED;
}
}
}
else
{
//正式发送处
if (group.add_data(*cache_change, is_inline_qos_expected_))
{
add_statistics_sent_submessage(cache_change, num_locators);
}
else
{
ret_code = DeliveryRetCode::NOT_DELIVERED;
}
}
}
}
on_sample_datas(cache_change->write_params.sample_identity(),
cache_change->writer_info.num_sent_submessages);
on_data_sent();
}
//不走该处发送
group.sender(this, &locator_selector);
if (DeliveryRetCode::DELIVERED == ret_code &&
change_sequence_number > last_sequence_number_sent_)
{
// This update must be done before calling the callback.
last_sequence_number_sent_ = change_sequence_number;
unsent_changes_cond_.notify_all();
if (nullptr != mp_listener)
{
//释放掉 cache_change
mp_listener->onWriterChangeReceivedByAll(this, cache_change);
}
}
return ret_code;
}
下一层发送接口 RTPSMessageGroup::add_data :
bool RTPSMessageGroup::add_data(
const CacheChange_t& change,
bool expectsInlineQos)
{
assert(nullptr != sender_);
CacheChangeInlineQoSWriter qos_writer(change);
InlineQosWriter* inline_qos;
inline_qos = (change.inline_qos.length > 0 && nullptr != change.inline_qos.data) ? &qos_writer : nullptr;
//打包dst guid 0.1.0.c7
const EntityId_t& readerId = get_entity_id(sender_->remote_guids());
CacheChange_t change_to_add;
change_to_add.copy_not_memcpy(&change);
change_to_add.serializedPayload.data = change.serializedPayload.data;
change_to_add.serializedPayload.length = change.serializedPayload.length;
// 打包src guid 0.1.0.c2
change_to_add.writerGUID = endpoint_->getGuid();
bool is_big_submessage;
//拷贝给 submessage_msg_
if (!RTPSMessageCreator::addSubmessageData(submessage_msg_, &change_to_add, endpoint_->getAttributes().topicKind,
readerId, expectsInlineQos, inline_qos, &is_big_submessage))
{
return false;
}
change_to_add.serializedPayload.data = nullptr;
//下层发送接口
return insert_submessage(is_big_submessage);
}
insert_submessage这一步未能发送。add_new_sample_impl结束后,会调用RTPSMessageGroup析构:实际发送动作在这里。
RTPSMessageGroup::~RTPSMessageGroup() noexcept(false)
{
try
{
send();
}
catch (...)
{
throw;
}
}
send()具体过程在后文分析,至此announceParticipantState分析完毕。这次发送实际发送给0.1.0.c7,因为自身存在0.1.0.c7的reader,因此也会接收到,但不会处理。
后面会进入writer.unsent_changes_reset()每隔一段时间广播一次,确保有其他新加入的参与者收到。
RegisterReceiver 分析
//从列表中取出receiver
for (auto& receiver : m_receiverResourcelist)
{
//进行注册
receiver.Receiver->RegisterReceiver(receiver.mp_receiver);
}
这里在前面分析中共create了3个receiver。注册过程:
void ReceiverResource::RegisterReceiver(
MessageReceiver* rcv)
{
std::lock_guard<std::mutex> _(mtx);
if (receiver == nullptr)
{
receiver = rcv;
}
}
至此RTPSParticipantImpl->enable()分析完毕。