本文基于vsomeip 3.1.20.3总结而成
源码地址:https://github.com/GENIVI/vsomeip.git
本文主要涉及vsomeip库中的如下代码:
在vsomeip中,提供了一个event类来实现SOME/IP协议中的事件所包含的信息与功能,在之前的写的demo篇中有介绍过事件的基础使用,包括事件的注册,订阅以及发送三个功能,那这一篇文章继续来看看event的处理具体处理流程。
事件订阅(request_event & subscribe )
在跟踪源码之前,先看下一个客户端订阅某个事件组的API介绍,主要是两个函数,request_event与subscribe, 函数定义在application.hpp中
//注册该应用模块为纯事件或者属性事件的使用者
virtual void request_event(service_t _service,
instance_t _instance,
event_t _event,
const std::set<eventgroup_t> &_eventgroups,
event_type_e _type = event_type_e::ET_EVENT,
reliability_type_e _reliability = reliability_type_e::RT_UNKNOWN) = 0;
//订阅事件组,该函数需在request_event后调用。
virtual void subscribe(service_t _service,
instance_t _instance,
eventgroup_t _eventgroup,
major_version_t _major = DEFAULT_MAJOR,
event_t _event = ANY_EVENT) = 0;
这两个函数就是这个模块跟踪的入口了,看代码,两个函数的实现在application_impl.cpp中,按照注释的顺序,我们先从request_event函数开始看
void application_impl::request_event(service_t _service, instance_t _instance,
event_t _event, const std::set<eventgroup_t> &_eventgroups,
event_type_e _type, reliability_type_e _reliability) {
//这里的routing_在之前的init流程分析过,可能指向host模式的rtm_impl,也
//可能指向proxy模式的rtm_proxy
if (routing_)
routing_->register_event(client_,
_service,
_instance,
_event,
_eventgroups,
_type,
_reliability,
std::chrono::milliseconds::zero(),
false,
true,
nullptr,
false);
}
还是先跟踪host模式的路由实现,因为proxy的实现相对来说比较简单。app_模块中的request_event啥也没干,直接交给路由模块的register_event函数处理了。
host路由中的register_event实现
/*
* app调用register_event传入的参数:
* _change_resets_cycle= false
* _update_on_change = true
* _epsilon_change_func = null
* _is_provided = false
* _is_shadow = false
* _is_cache_placeholder = false
*/
void routing_manager_impl::register_event(client_t _client,
service_t _service, instance_t _instance,
event_t _notifier,
const std::set<eventgroup_t> &_eventgroups,
const event_type_e _type,
reliability_type_e _reliability,
std::chrono::milliseconds _cycle,
bool _change_resets_cycle,
bool _update_on_change,
epsilon_change_func_t _epsilon_change_func,
bool _is_provided,
bool _is_shadow,
bool _is_cache_placeholder) {
//从APP中的event缓存map中查找是否已经存在对应的event实例
auto its_event = find_event(_service, _instance, _notifier);
bool is_first(false);
//判断是否为首次注册
if (its_event) {
if (!its_event->has_ref(_client, _is_provided)) {
is_first = true;
}
} else {
is_first = true;
}
if (is_first) {
//首次注册的情况下,调用父类的register_event方法
routing_manager_base::register_event(_client,
_service, _instance,
_notifier,
_eventgroups, _type, _reliability,
_cycle, _change_resets_cycle, _update_on_change,
_epsilon_change_func, _is_provided, _is_shadow,
_is_cache_placeholder);
}
//忽略日志打印代码
}
上面的流程也比较简单,就是根据服务实例以及事件ID来判断事件是否已经注册过,已经注册过的情况下,就不处理该次注册动作了,接着看rtm_base中的流程,因为rtm_base ::register_event中源码挺多的,而且host与proxy共用逻辑,这里先对整个流程画了一个流程图简述一下
void routing_manager_base::register_event(client_t _client,
service_t _service, instance_t _instance,
event_t _notifier,
const std::set<eventgroup_t> &_eventgroups,
const event_type_e _type,
reliability_type_e _reliability,
std::chrono::milliseconds _cycle, bool _change_resets_cycle,
bool _update_on_change,
epsilon_change_func_t _epsilon_change_func,
bool _is_provided, bool _is_shadow, bool _is_cache_placeholder) {
std::lock_guard<std::mutex> its_registration_lock(event_registration_mutex_);
auto determine_event_reliability = [this, &_service, &_instance,
&_notifier, &_reliability]() {
reliability_type_e its_reliability =
configuration_->get_event_reliability(_service, _instance, _notifier);
if (its_reliability != reliability_type_e::RT_UNKNOWN) {
// event was explicitly configured -> overwrite value passed via API
return its_reliability;
} else if (_reliability != reliability_type_e::RT_UNKNOWN) {
// use value provided via API
return _reliability;
} else { // automatic mode, user service' reliability
return configuration_->get_service_reliability(_service, _instance);
}
};
//从已注册的事件表中查找当前事件是否已经存在
std::shared_ptr<event> its_event = find_event(_service, _instance, _notifier);
bool transfer_subscriptions_from_any_event(false);
if (its_event) {
//事件已经注册过,判断已注册的事件是否是占位事件
if (!its_event->is_cache_placeholder()) {
if (_type == its_event->get_type()
|| its_event->get_type() == event_type_e::ET_UNKNOWN
#ifdef VSOMEIP_ENABLE_COMPAT
|| (its_event->get_type() == event_type_e::ET_EVENT
&& _type == event_type_e::ET_SELECTIVE_EVENT)
|| (its_event->get_type() == event_type_e::ET_SELECTIVE_EVENT
&& _type == event_type_e::ET_EVENT && _is_provided)
#endif
) {
//非占位事件,且事件类型一致则根据传入的参数更新事件信息
#ifdef VSOMEIP_ENABLE_COMPAT
if (its_event->get_type() == event_type_e::ET_EVENT
&& _type == event_type_e::ET_SELECTIVE_EVENT) {
its_event->set_type(_type);
}
#endif
if (_is_provided) {
its_event->set_provided(true);
its_event->set_reliability(determine_event_reliability());
}
if (_is_shadow && _is_provided) {
its_event->set_shadow(_is_shadow);
}
//注册该事件的客户端为host路由应用,强制将事件标记为非影子事件
if (_client == host_->get_client() && _is_provided) {
its_event->set_shadow(false);
its_event->set_update_on_change(_update_on_change);
}
//更新事件的事件组信息
for (auto eg : _eventgroups) {
its_event->add_eventgroup(eg);
}
transfer_subscriptions_from_any_event = true;
} else {
#ifdef VSOMEIP_ENABLE_COMPAT
if (!(its_event->get_type() == event_type_e::ET_SELECTIVE_EVENT
&& _type == event_type_e::ET_EVENT))
#endif
VSOMEIP_ERROR << "Event registration update failed. "
"Specified arguments do not match existing registration.";
}
} else {
//该事件之前已经作为占位事件注册过,这里将占位事件变为真实注册事件
//并更新事件信息
if (_type != event_type_e::ET_FIELD) {
// don't cache payload for non-fields
its_event->unset_payload(true);
}
if (_is_shadow && _is_provided) {
its_event->set_shadow(_is_shadow);
}
if (_client == host_->get_client() && _is_provided) {
its_event->set_shadow(false);
its_event->set_update_on_change(_update_on_change);
}
its_event->set_type(_type);
its_event->set_reliability(determine_event_reliability());
its_event->set_provided(_is_provided);
its_event->set_cache_placeholder(false);
std::shared_ptr<serviceinfo> its_service = find_service(_service, _instance);
if (its_service) {
its_event->set_version(its_service->get_major());
}
if (_eventgroups.size() == 0) { // No eventgroup specified
std::set<eventgroup_t> its_eventgroups;
its_eventgroups.insert(_notifier);
its_event->set_eventgroups(its_eventgroups);
} else {
for (auto eg : _eventgroups) {
its_event->add_eventgroup(eg);
}
}
its_event->set_epsilon_change_function(_epsilon_change_func);
its_event->set_change_resets_cycle(_change_resets_cycle);
its_event->set_update_cycle(_cycle);
}
} else {
//该事件之前没有注册过,则创建新的event对象
its_event = std::make_shared<event>(this, _is_shadow);
its_event->set_service(_service);
its_event->set_instance(_instance);
its_event->set_event(_notifier);
its_event->set_type(_type);
its_event->set_reliability(determine_event_reliability());
its_event->set_provided(_is_provided);
its_event->set_cache_placeholder(_is_cache_placeholder);
std::shared_ptr<serviceinfo> its_service = find_service(_service, _instance);
if (its_service) {
its_event->set_version(its_service->get_major());
}
if (_eventgroups.size() == 0) { // No eventgroup specified
std::set<eventgroup_t> its_eventgroups;
its_eventgroups.insert(_notifier);
its_event->set_eventgroups(its_eventgroups);
} else {
its_event->set_eventgroups(_eventgroups);
}
//当前注册的是影子事件且epsilon变化事件处理函数为空,epsilon变化的意思是:仅当与最后一个值的差异大于某个阈值时才发送更新。
if (_is_shadow && !_epsilon_change_func) {
std::shared_ptr<cfg::debounce> its_debounce
= configuration_->get_debounce(_service, _instance, _notifier);
if (its_debounce) {
//省略了部分日志代码
//根据配置文件中的debounce配置信息构建新的_epsilon_change_func函数
_epsilon_change_func = [its_debounce](
const std::shared_ptr<payload> &_old,
const std::shared_ptr<payload> &_new) {
bool is_changed(false), is_elapsed(false);
// Check whether we should forward because of changed data
if (its_debounce->on_change_) {
length_t its_min_length, its_max_length;
if (_old->get_length() < _new->get_length()) {
its_min_length = _old->get_length();
its_max_length = _new->get_length();
} else {
its_min_length = _new->get_length();
its_max_length = _old->get_length();
}
// Check whether all additional bytes (if any) are excluded
for (length_t i = its_min_length; i < its_max_length; i++) {
auto j = its_debounce->ignore_.find(i);
// A change is detected when an additional byte is not
// excluded at all or if its exclusion does not cover
// all its bits.
if (j == its_debounce->ignore_.end() || j->second != 0xFF) {
is_changed = true;
break;
}
}
if (!is_changed) {
const byte_t *its_old = _old->get_data();
const byte_t *its_new = _new->get_data();
for (length_t i = 0; i < its_min_length; i++) {
auto j = its_debounce->ignore_.find(i);
if (j == its_debounce->ignore_.end()) {
if (its_old[i] != its_new[i]) {
is_changed = true;
break;
}
} else if (j->second != 0xFF) {
if ((its_old[i] & ~(j->second)) != (its_new[i] & ~(j->second))) {
is_changed = true;
break;
}
}
}
}
}
if (its_debounce->interval_ > -1) {
// Check whether we should forward because of the elapsed time since
// we did last time
std::chrono::steady_clock::time_point its_current
= std::chrono::steady_clock::now();
long elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
its_current - its_debounce->last_forwarded_).count();
is_elapsed = (its_debounce->last_forwarded_ == (std::chrono::steady_clock::time_point::max)()
|| elapsed >= its_debounce->interval_);
if (is_elapsed || (is_changed && its_debounce->on_change_resets_interval_))
its_debounce->last_forwarded_ = its_current;
}
return (is_changed || is_elapsed);
};
} else {
//构造一个空的函数
_epsilon_change_func = [](const std::shared_ptr<payload> &_old,
const std::shared_ptr<payload> &_new) {
(void)_old;
(void)_new;
return true;
};
}
}
//设置触发事件信息
its_event->set_epsilon_change_function(_epsilon_change_func);
its_event->set_change_resets_cycle(_change_resets_cycle);
its_event->set_update_cycle(_cycle);
its_event->set_update_on_change(_update_on_change);
if (_is_provided) {
transfer_subscriptions_from_any_event = true;
}
}
if (transfer_subscriptions_from_any_event) {
// check if someone subscribed to ANY_EVENT and the subscription
// was stored in the cache placeholder. Move the subscribers
// into new event
//获取指定服务实例中的任意事件的event对象
std::shared_ptr<event> its_any_event =
find_event(_service, _instance, ANY_EVENT);
if (its_any_event) {
//当前缓存中存在任意事件的event,获取该event所在的事件组
std::set<eventgroup_t> any_events_eventgroups =
its_any_event->get_eventgroups();
//遍历当前注册事件的所在事件组
for (eventgroup_t eventgroup : _eventgroups) {
//任意事件的事件组集中包含了当前注册事件的事件组
auto found_eg = any_events_eventgroups.find(eventgroup);
if (found_eg != any_events_eventgroups.end()) {
//获取订阅任意事件所在事件组的客户端ID集
std::set<client_t> its_any_event_subscribers =
its_any_event->get_subscribers(eventgroup);
//更新当前事件的订阅器信息
for (const client_t subscriber : its_any_event_subscribers) {
its_event->add_subscriber(eventgroup, subscriber, true);
}
}
}
}
}
//事件为真实事件,添加该客户端的引用
if (!its_event->is_cache_placeholder()) {
its_event->add_ref(_client, _is_provided);
}
//更新事件组信息
for (auto eg : _eventgroups) {
std::shared_ptr<eventgroupinfo> its_eventgroupinfo
= find_eventgroup(_service, _instance, eg);
if (!its_eventgroupinfo) {
its_eventgroupinfo = std::make_shared<eventgroupinfo>();
its_eventgroupinfo->set_service(_service);
its_eventgroupinfo->set_instance(_instance);
its_eventgroupinfo->set_eventgroup(eg);
std::lock_guard<std::mutex> its_lock(eventgroups_mutex_);
eventgroups_[_service][_instance][eg] = its_eventgroupinfo;
}
its_eventgroupinfo->add_event(its_event);
}
std::lock_guard<std::mutex> its_lock(events_mutex_);
//更新已注册事件信息
events_[_service][_instance][_notifier] = its_event;
}
到这里,我们基本就清楚了host模式的vsomeip app中的request_event的主要作用是创建了eventgroupinfo, event两个实例的共享指针 ,并将其添加到模块中的eventgroups_ 以及events_表中
proxy路由中的request_event实现
proxy中的request_event流程比较简单,首先是app_模块中的request_event什么也没做,直接调用了rtm_proxy中的register_event事件:
void routing_manager_proxy::register_event(client_t _client,
service_t _service, instance_t _instance,
event_t _notifier,
const std::set<eventgroup_t> &_eventgroups, const event_type_e _type,
reliability_type_e _reliability,
std::chrono::milliseconds _cycle, bool _change_resets_cycle,
bool _update_on_change, epsilon_change_func_t _epsilon_change_func,
bool _is_provided, bool _is_shadow, bool _is_cache_placeholder) {
(void)_is_shadow;
(void)_is_cache_placeholder;
//创建一个事件注册器
const event_data_t registration = {
_service,
_instance,
_notifier,
_type,
_reliability,
_is_provided,
_eventgroups
};
bool is_first(false);
{
//从当前已经pending的事件注册器中年查找对应的事件注册器是否已经存在,如果不存在的情况下,就是该事件的首次注册
std::lock_guard<std::mutex> its_lock(state_mutex_);
is_first = pending_event_registrations_.find(registration)
== pending_event_registrations_.end();
#ifndef VSOMEIP_ENABLE_COMPAT
if (is_first) {
pending_event_registrations_.insert(registration);
}
#else
...
...
#endif
}
//首次注册的情况下,调用rtm_base创建event实例与eventgroup实例,并将其加入到缓存map中,rtm_base::register_event中的逻辑参考host章节中的流程。
if (is_first || _is_provided) {
routing_manager_base::register_event(_client,
_service, _instance,
_notifier,
_eventgroups, _type, _reliability,
_cycle, _change_resets_cycle, _update_on_change,
_epsilon_change_func,
_is_provided);
}
{
std::lock_guard<std::mutex> its_lock(state_mutex_);
//如果当前应用状态已注册,且事件为第一次注册,则发送命令到host端实现register_event流程
if (state_ == inner_state_type_e::ST_REGISTERED && is_first) {
send_register_event(client_, _service, _instance,
_notifier, _eventgroups, _type, _reliability, _is_provided);
}
}
}
继续跟踪send_register_event函数,发现其中的逻辑比较简单,就是根据传入的事件信息拼包,然后通过unix域socket的方式发送类型为VSOMEIP_REGISTER_EVENT命令到host端:
```cpp
void routing_manager_proxy::send_register_event(client_t _client,
service_t _service, instance_t _instance,
event_t _notifier,
const std::set<eventgroup_t> &_eventgroups, const event_type_e _type,
reliability_type_e _reliability,
bool _is_provided) {
...
...
byte_t *its_command = new byte_t[its_eventgroups_size];
uint32_t its_size = static_cast<std::uint32_t>(its_eventgroups_size)
- VSOMEIP_COMMAND_HEADER_SIZE;
its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_REGISTER_EVENT;
//省略拼包逻辑
std::size_t i = 9;
for (auto eg : _eventgroups) {
std::memcpy(&its_command[VSOMEIP_COMMAND_PAYLOAD_POS + i], &eg,
sizeof(eventgroup_t));
i += sizeof(eventgroup_t);
}
{
std::lock_guard<std::mutex> its_lock(sender_mutex_);
//这个sender_在init流程的分析中跟过,它就是一个类型为local_client_endpoint的对象
if (sender_) {
sender_->send(its_command, static_cast<std::uint32_t>(its_eventgroups_size));
}
}
if (_is_provided) {
//打印日志
}
delete[] its_command;
}
总结一下:vsomeip的应用如果需要在某个事件触发的时候得到消息的回调,就必须通过其提供的request_event方法来请求事件,请求事件的逻辑又会根据路由的host角色与proxy角色的不同来处理逻辑,两种角色的相同部分逻辑就是会在自己的模块中创建需要关联的event与eventgroup的信息,不同的部分在于proxy端还需要将它所订阅的事件信息发送给host路由,当host路由收到网段内其他端的事件通知时,它就会将其通知给对应的proxy端。
走完request_event的流程,接下来看subscribe,客户端订阅事件需要request_event与subscribe搭配才能收到事件。
Host路由的subscribe实现
host路由的subscribe函数实现再routing_maanger_impl.cpp文件中,函数定义如下:
void routing_manager_impl::subscribe(client_t _client, uid_t _uid, gid_t _gid,
service_t _service, instance_t _instance, eventgroup_t _eventgroup,
major_version_t _major, event_t _event)
该函数实现,首先根据服务实例信息查看本地服务中是否有提供的该服务实例的客户端ID
const client_t its_local_client = find_local_client(_service, _instance);
这里有几个判断条件,汇总如下:
- 根据订阅的事件信息,从本地服务的缓存中查找是否存在提供该事件的客户端
- 提供事件信息的客户端是当前的app模块(get_client() == its_local_client),那么直接通过app中的on_subscription触发回调,告知订阅成功,app模块的回调函数如果允许订阅,则通过rtm_stub发送subscribe_ack命令,否则发送subscribe_nack
- 如果提供事件信息的客户端不是当前app模块,且本地服务中没有找到有提供该事件的实例,那么就通过SD模块去发起远程订阅(网段内广播订阅)
- 如果提供事件信息的客户端不是当前app模块,但是在本地服务中找到存在了事件的服务实例,通过rtm_stub告知对应的app端有其他的客户端向它的服务中包含的事件发起了订阅(unix域通信)。
下面是代码
//简化后的代码:订阅的服务实例类型为本地服务
void routing_manager_impl::subscribe(client_t _client, uid_t _uid, gid_t _gid,
service_t _service, instance_t _instance, eventgroup_t _eventgroup,
major_version_t _major, event_t _event) {
const client_t its_local_client = find_local_client(_service, _instance);
if (get_client() == its_local_client) {
auto self = shared_from_this();
//调用application模块的on_subscription方法
host_->on_subscription(_service, _instance, _eventgroup, _client, _uid, _gid, true,
[this, self, _client, _uid, _gid, _service, _instance, _eventgroup,
_event, _major]
(const bool _subscription_accepted) {
(void) ep_mgr_->find_or_create_local(_client);
//如果当前app拒绝订阅,则通过stub发送nack给到订阅事件请求的客户端,并返回
if (!_subscription_accepted) {
stub_->send_subscribe_nack(_client, _service, _instance, _eventgroup, _event);
return;
} else {
//如果application中接受订阅,则发送ack
stub_->send_subscribe_ack(_client, _service, _instance, _eventgroup, _event);
}
//如果application中接受订阅,调用rtm_base的subscribe,创建事件的订阅器
routing_manager_base::subscribe(_client, _uid, _gid, _service, _instance, _eventgroup, _major, _event);
});
}
提供事件信息的客户端不是当前app模块
if (discovery_) {
std::set<event_t> its_already_subscribed_events;
std::unique_lock<std::mutex> its_critical(remote_subscription_state_mutex_);
//添加订阅器
bool inserted = insert_subscription(_service, _instance, _eventgroup,
_event, _client, &its_already_subscribed_events);
if (inserted) {
//当前为路由模块
if (0 == its_local_client) {
handle_subscription_state(_client, _service, _instance, _eventgroup, _event);
its_critical.unlock();
static const ttl_t configured_ttl(configuration_->get_sd_ttl());
//触发一次事件通知
notify_one_current_value(_client, _service, _instance,
_eventgroup, _event, its_already_subscribed_events);
//通过SD模块,广播远程订阅信息
auto its_info = find_eventgroup(_service, _instance, _eventgroup);
if (its_info) {
discovery_->subscribe(_service, _instance, _eventgroup,
_major, configured_ttl,
its_info->is_selective() ? _client : VSOMEIP_ROUTING_CLIENT,
its_info);
}
} else {
//非路由模块
its_critical.unlock();
if (is_available(_service, _instance, _major)) {
//发送命令类型为VSOMEIP_SUBSCRIBE的报文给到代理端,rtm_proxy端的on_message函数最终
//会收到该报文
stub_->send_subscribe(ep_mgr_->find_local(_service, _instance),
_client, _service, _instance, _eventgroup, _major, _event,
PENDING_SUBSCRIPTION_ID);
}
}
}
if (get_client() == _client) {
std::lock_guard<std::mutex> ist_lock(pending_subscription_mutex_);
subscription_data_t subscription = {
_service, _instance, _eventgroup, _major, _event, _uid, _gid
};
pending_subscriptions_.insert(subscription);
}
} else {
VSOMEIP_ERROR<< "SOME/IP eventgroups require SD to be enabled!";
}
}
上面的代码中,我们看到几个子流程:
- 给对应客户端发subscribe nack命令:routing_manager_stub::send_subscribe_nack
- 给对应客户端发subscribe ack命令:routing_manager_stub::send_subscribe_ack
- 给对应客户端发订阅命令:routing_manager_stub::send_subscribe
- 创建订阅器:routing_manager_base::subscribe
- 调用sd模块的subscribe方法
我们摘出来一个个的单个看
routing_manager_stub::send_subscribe_nack 这个函数调用的场景在订阅时提供事件信息的客户端是当前的app模块,且app模块中通过register_subscription_handler注册了订阅操作函数,此时在订阅回调触发时,如果app模块拒绝客户端订阅,则会发送一个nack给到订阅的客户端。该函数定义如下:
void routing_manager_stub::send_subscribe_nack(client_t _client, service_t _service,
instance_t _instance, eventgroup_t _eventgroup, event_t _event) {
//找到目标client的endpoint对象
std::shared_ptr<endpoint> its_endpoint = host_->find_local(_client);
if (its_endpoint) {
byte_t its_command[VSOMEIP_SUBSCRIBE_NACK_COMMAND_SIZE];
uint32_t its_size = VSOMEIP_SUBSCRIBE_NACK_COMMAND_SIZE
- VSOMEIP_COMMAND_HEADER_SIZE;
client_t this_client = get_client();
its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_SUBSCRIBE_NACK;
//省略了拼包逻辑
//将该命令报文发送给到对应的client端
its_endpoint->send(&its_command[0], sizeof(its_command));
}
}
这个消息会被客户端的routing_manager_proxy中消化:
void routing_manager_proxy::on_message(const byte_t *_data, length_t _size,
endpoint *_receiver, const boost::asio::ip::address &_destination,
client_t _bound_client,
credentials_t _credentials,
const boost::asio::ip::address &_remote_address,
std::uint16_t _remote_port) {
//省略代码
case VSOMEIP_SUBSCRIBE_NACK:
//省略代码
//直接转到了该函数进行处理
on_subscribe_nack(its_subscriber, its_service, its_instance, its_eventgroup, its_event);
}
void routing_manager_proxy::on_subscribe_nack(client_t _client,
service_t _service, instance_t _instance, eventgroup_t _eventgroup, event_t _event) {
(void)_client;
//如果是任意类型的事件订阅被拒绝,则通知对应的事件组中所有事件的服务模块该事件订阅被拒绝
if (_event == ANY_EVENT) {
auto its_eventgroup = find_eventgroup(_service, _instance, _eventgroup);
if (its_eventgroup) {
for (const auto& its_event : its_eventgroup->get_events()) {
host_->on_subscription_status(_service, _instance, _eventgroup, its_event->get_event(), 0x7 /*Rejected*/);
}
}
} else {
//通知client端的app模块,该事件被拒绝
host_->on_subscription_status(_service, _instance, _eventgroup, _event, 0x7 /*Rejected*/);
}
}
最终,如果client的app有注册订阅状态的操作函数,则能够监听到订阅状态的回调
app_->register_subscription_status_handler
routing_manager_stub::send_subscribe_ack的流程与nack的流程基本大同小异, 这里不再复述了。
routing_manager_stub::send_subscribe : 这个给客户端发送订阅的调用场景客户端订阅时,提供订阅事件的服务实例由另外一个代理客户端提供的,所以这里拼了一个VSOMEIP_SUBSCRIBE的包发给rtm_proxy去处理
调用sd模块的subscribe方法:SD模块的业务实现在service_discovery_impl中,刨去细枝末节,来跟一下整个subscribe的流程如下:
void
service_discovery_impl::subscribe(
service_t _service, instance_t _instance,
eventgroup_t _eventgroup, major_version_t _major,
ttl_t _ttl, client_t _client,
const std::shared_ptr<eventgroupinfo> &_info) {
...
...
send_subscription(its_subscription,
_service, _instance, _eventgroup,
_client);
}
void
service_discovery_impl::send_subscription(
const std::shared_ptr<subscription> &_subscription,
const service_t _service, const instance_t _instance,
const eventgroup_t _eventgroup,
const client_t _client) {
auto its_reliable = _subscription->get_endpoint(true);
auto its_unreliable = _subscription->get_endpoint(false);
boost::asio::ip::address its_address;
get_subscription_address(its_reliable, its_unreliable, its_address);
if (!its_address.is_unspecified()) {
...
...
if (its_data.entry_) {
auto its_current_message = std::make_shared<message_impl>();
std::vector<std::shared_ptr<message_impl> > its_messages;
its_messages.push_back(its_current_message);
add_entry_data(its_messages, its_data);
//序列化数据,然后发送报文
serialize_and_send(its_messages, its_address);
}
}
}
//该函数中省略了部分代码
bool
service_discovery_impl::serialize_and_send(
const std::vector<std::shared_ptr<message_impl> > &_messages,
const boost::asio::ip::address &_address) {
if (!_address.is_unspecified()) {
std::lock_guard<std::mutex> its_lock(serialize_mutex_);
for (const auto &m : _messages) {
if (m->has_entry()) {
...
...
//序列化报文数据
if (serializer_->serialize(m.get())) {
if (host_->send_via_sd(endpoint_definition::get(_address, port_,
reliable_, m->get_service(), m->get_instance()),
serializer_->get_data(), serializer_->get_size(),
port_)) {
//新增session id
increment_session(_address);
}
}
}
}
}
}
//上面函数走完了,业务层面的就下完了,后面是网络发送相关的逻辑,host_指针类型是routing_manager_impl, send_via_sd实现在rtm_impl中。
bool routing_manager_impl::send_via_sd(
const std::shared_ptr<endpoint_definition> &_target,
const byte_t *_data, uint32_t _size, uint16_t _sd_port) {
std::shared_ptr<endpoint> its_endpoint =
ep_mgr_impl_->find_server_endpoint(_sd_port,
_target->is_reliable());
return its_endpoint->send_to(_target, _data, _size);
}
//上面的its_endpoint实际是udp_server_endpoint_impl类型的共享指针
bool udp_server_endpoint_impl::send_to(
const std::shared_ptr<endpoint_definition> _target,
const byte_t *_data, uint32_t _size) {
std::lock_guard<std::mutex> its_lock(mutex_);
endpoint_type its_target(_target->get_address(), _target->get_port());
return send_intern(its_target, _data, _size);
}
//send_intern实现在父类server_endpoint_impl中,是一个模板函数
template<typename Protocol>
bool server_endpoint_impl<Protocol>::send_intern(
endpoint_type _target, const byte_t *_data, uint32_t _size) {
...
...
// STEP 10: restart timer with current departure time
target_train->departure_timer_->expires_from_now(target_train->departure_);
target_train->departure_timer_->async_wait(
std::bind(&server_endpoint_impl<Protocol>::flush_cbk,
this->shared_from_this(), _target,
target_train, std::placeholders::_1));
}
//接下来flush_cbk触发执行
template<typename Protocol>
void server_endpoint_impl<Protocol>::flush_cbk(
endpoint_type _target,
const std::shared_ptr<train>& _train, const boost::system::error_code &_error_code) {
if (!_error_code) {
(void) flush(_target, _train);
}
}
//调用了flush函数
template<typename Protocol>
bool server_endpoint_impl<Protocol>::flush(
endpoint_type _target,
const std::shared_ptr<train>& _train){
//buffer不为空的情况下,循环取出数据加入队列
if (!_train->buffer_->empty()){
const queue_iterator_type target_queue_iterator = queues_.find(_target);
if (target_queue_iterator != queues_.end()) {
const bool queue_size_zero_on_entry(target_queue_iterator->second.second.empty());
queue_train(target_queue_iterator, _train, queue_size_zero_on_entry);
is_flushed = true;
}
}
}
//将数据不断发送到报文
template<typename Protocol>
void server_endpoint_impl<Protocol>::queue_train(
const queue_iterator_type _queue_iterator,
const std::shared_ptr<train>& _train,
bool _queue_size_zero_on_entry) {
...
send_queued(_queue_iterator);
}
//send_queued是一个虚函数,由子类自行实现,这里又走到了udp_server_endpoint_impl中, 发送流程基本就走完了。
void udp_server_endpoint_impl::send_queued(
const queue_iterator_type _queue_iterator) {
message_buffer_ptr_t its_buffer = _queue_iterator->second.second.front();
...
...
std::lock_guard<std::mutex> its_lock(unicast_mutex_);
unicast_socket_.async_send_to(
boost::asio::buffer(*its_buffer),
_queue_iterator->first,
std::bind(
&udp_server_endpoint_base_impl::send_cbk,
shared_from_this(),
_queue_iterator,
std::placeholders::_1,
std::placeholders::_2
)
);
}
proxy路由的subscribe实现
跟以往所有流程一样,proxy的实现相对host来说要简单很多了,proxy中的subscribe实现在routing_manager_proxy中
void routing_manager_proxy::subscribe(client_t _client, uid_t _uid, gid_t _gid, service_t _service,
instance_t _instance, eventgroup_t _eventgroup, major_version_t _major,
event_t _event) {
...
...
std::lock_guard<std::mutex> its_lock(state_mutex_);
//服务可用,且当前app状态为已注册
if (state_ == inner_state_type_e::ST_REGISTERED && is_available(_service, _instance, _major)) {
send_subscribe(client_, _service, _instance, _eventgroup, _major, _event );
}
subscription_data_t subscription = { _service, _instance, _eventgroup, _major, _event, _uid, _gid};
pending_subscriptions_.insert(subscription);
}
void routing_manager_proxy::send_subscribe(client_t _client, service_t _service,
instance_t _instance, eventgroup_t _eventgroup, major_version_t _major,
event_t _event) {
...
its_command[VSOMEIP_COMMAND_TYPE_POS] = VSOMEIP_SUBSCRIBE;
...
client_t target_client = find_local_client(_service, _instance);
if (target_client != VSOMEIP_ROUTING_CLIENT) {
//发送命令包给该订阅事件所在服务所属的客户端,存在于这种情况,同一个进程内存在
//一个路由app与多个proxy app, 其中某个proxy app提供了该订阅所需的事件。
//那么这个报文就发送到了另外一个app模块的routing_manager_proxy中的on_message方法
//处理
auto its_target = ep_mgr_->find_or_create_local(target_client);
its_target->send(its_command, sizeof(its_command));
} else {
//发送命令给到路由app,该命令由routing_manager_stub处理。
std::lock_guard<std::mutex> its_lock(sender_mutex_);
if (sender_) {
sender_->send(its_command, sizeof(its_command));
}
}
}
//第一种情况,proxy->proxy
void routing_manager_proxy::on_message(const byte_t *_data, length_t _size,
endpoint *_receiver, const boost::asio::ip::address &_destination,
client_t _bound_client,
credentials_t _credentials,
const boost::asio::ip::address &_remote_address,
std::uint16_t _remote_port) {
...
....
case VSOMEIP_SUBSCRIBE:
//1.触发app中的subscription status回调,如果app模块允许订阅,则回复ack, 否则回复nack
//2.创建订阅器
}
//第二种情况,proxy->stub
void routing_manager_stub::on_message(const byte_t *_data, length_t _size,
endpoint *_receiver, const boost::asio::ip::address &_destination,
client_t _bound_client,
credentials_t _credentials,
const boost::asio::ip::address &_remote_address,
std::uint16_t _remote_port) {
case VSOMEIP_SUBSCRIBE:
//调用rtm_impl中的subscribe流程
host_->subscribe(its_client, its_sender_uid, its_sender_gid, its_service, its_instance,
its_eventgroup, its_major, its_notifier);
}