void ObjectCacherObjectDispatch<I>::init() {
auto cct = m_image_ctx->cct;
...
m_writeback_handler = new LibrbdWriteback(m_image_ctx, m_cache_lock);
uint64_t init_max_dirty = m_image_ctx->cache_max_dirty;
if (m_image_ctx->cache_writethrough_until_flush) {
init_max_dirty = 0;
}
...
m_object_cacher = new ObjectCacher(cct, m_image_ctx->perfcounter->get_name(),
*m_writeback_handler, m_cache_lock,
nullptr, nullptr, cache_size,
10, /* reset this in init */
init_max_dirty, target_dirty,
max_dirty_age, block_writes_upfront);
...
m_object_cacher->start();
m_image_ctx->io_object_dispatcher->register_object_dispatch(this);
}
在初始化ObjectCacherObjectDispatch的时候,先根据配置项rbd_journal_object_writethrough_until_flush
是否初始化init_max_dirty
。
rbd_journal_object_writethrough_until_flush
默认是true,因此默认不开启cache。
因此我们有两种可能会导致rbd 的cache开启:
- 可能1:
即直接把配置项rbd_journal_object_writethrough_until_flush
设置为false - 可能2:
rbd_journal_object_writethrough_until_flush
设置为true,等待客户端的使用者主动来调用flush接口,那么cache就会被开启,这从某种意义上来说是一种数据安全机制,毕竟主机主动调用flush,那意味着他能够忍受有部分数据在cache中,不透写到持久化层。
逻辑如下:
template <typename I>
bool ObjectCacherObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
// ensure we aren't holding the cache lock post-flush
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
m_cache_lock.Lock();
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed &&
m_image_ctx->cache_writethrough_until_flush &&
m_image_ctx->cache_max_dirty > 0) {
m_user_flushed = true;
m_object_cacher->set_max_dirty(m_image_ctx->cache_max_dirty);
ldout(cct, 5) << "saw first user flush, enabling writeback" << dendl;
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_object_cacher->flush_set(m_object_set, on_dispatched);
m_cache_lock.Unlock();
return true;
}