提示:
此博客中代码不一定是最新,最新代码请参考:https://github.com/xj-zhu/ttl_cache
看到这篇文章,或许你会有以下疑问:
现在有各种开源的内存k-v缓存数据库,为啥要自己搞一个?
几点解释:
1. 这些k-v系统一般都比较重量级,当然相对于关系型数据库可能还好,但是相对于我的这个实现来说绝对是重量级的
2. 量级太重意味着使用会更加复杂,前期熟悉的成本高
3. 主流的k-v系统使用场景还是类似于数据库,提供大量数据存储,我的实现相比之下更像是一个匕首,小而灵活,使用场景有区别
4. 主流的k-v系统不会对语言依赖太多,所以数据类型比较少,相对于c++的强类型来说并不能无缝兼容
5. 量级重的缓存系统使用c/s架构,是独立于用户程序以外的,在服务端使用还好,但是如果是客户端,则基本不可用。
6. 可以配合其他k-v或者数据库使用,在程序内存中存储少量但是热度高的数据。
7. 除非要落盘/持久化/转移到其他数据存储系统,否则在数据周转过程中不需要对数据进行序列化和反序列化,因为数据均可以是C++直接支持的类型。
现在提供一种使用c++实现的k-v缓存系统,其中key可以是任何c++类型(但是有需要注意的细节,下面再细说),而且可以通过多个key来定位value值,并且类似于redis提供键的生命周期,故类名为ttl_cache
代码:(因为是模板,故只提供了一个头文件)
#ifndef _TTL_CACHE_H_
#define _TTL_CACHE_H_
#include <memory>
#include <chrono>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <functional>
#include <list>
#include <map>
#include <algorithm>
#include <type_traits>
#include <typeinfo>
#include <assert.h>
namespace ttl
{
namespace type_traits
{
template<typename _Tp, typename _Up>
class _is_appendable_helper
{
template<typename _Tp1, typename _Up1,
typename = decltype(std::declval<_Tp1>() += std::declval<_Up1>())>
static std::true_type __test(int);
template<typename, typename>
static std::false_type __test(...);
public:
typedef decltype(__test<_Tp&, _Up&>(0)) type;
};
template<typename _Tp, typename _Up>
struct is_appendable : public _is_appendable_helper<_Tp, _Up>::type
{ };
}
using namespace std::chrono;
enum DataStoreType //数据暂存方式
{
DS_Err, //错误
DS_SINGLE, //单条数据,更新即覆盖
DS_QUEUE, //序列数据,更新即追加
};
typedef int DataType;
static constexpr DataType DT_Err = -1;
class cache_mgr;
class cache_base
{
public:
virtual ~cache_base() {}
virtual long use_count() const noexcept
{
assert(false);
return 0;
}
private:
virtual void _OnCopy() {};
virtual void _OnAppend(const cache_base& that) {};
public:
cache_base& operator = (const cache_base& that)
{
if (this != &that)
{
m_managed = that.m_managed;
_OnCopy();
}
return *this;
}
cache_base& operator+=(const cache_base& that)
{
_OnAppend(that);
return *this;
}
public:
cache_base* m_managed = nullptr;
friend class cache_mgr;
};
class cache_mgr
{
enum TtlStrategy
{
TTL_WHEN_START,
TTL_WHEN_ALL_RELEASE,
};
public:
static cache_mgr& Instance() { static cache_mgr inst; return inst; }
public:
template <typename... Keys>
bool GetCache(cache_base& _cache, DataType edt, Keys&&... keys)
{
typedef std::map<std::tuple<std::remove_const_t<std::remove_reference_t<Keys>>...>
, std::weak_ptr<cache_base>> CacheMap;
std::lock_guard<std::recursive_mutex> l(m_mutex);
auto it1 = m_records.find(edt);
if (it1 != m_records.end())
{
auto& tp = it1->second;
CacheMap* pmap = (CacheMap*)(std::get<0>(tp));
auto& uniqe_func = std::get<2>(tp);
if (pmap && uniqe_func && (*uniqe_func) && (*uniqe_func)(typeid(CacheMap)))
{
auto it2 = pmap->find(std::forward_as_tuple(std::forward<Keys>(keys)...));
if (it2 != pmap->end())
{
auto shared = it2->second.lock();
if (shared && shared.get())
{
_cache = *(shared.get());
return true;
}
pmap->erase(it2);
if (pmap->empty())
{
delete pmap;
m_records.erase(edt);
}
}
}
}
return false;
}
template <typename... Keys>
bool SetCache(cache_base* _cache, DataStoreType edst, DataType edt, time_t lifems, Keys&&... keys)
{
typedef std::map<std::tuple<std::remove_const_t<std::remove_reference_t<Keys>>...>
, std::weak_ptr<cache_base>> CacheMap;
std::shared_ptr<cache_base> shared(_cache);
std::lock_guard<std::recursive_mutex> l(m_mutex);
auto it = m_records.find(edt);
CacheMap* pmap = nullptr;
if (it != m_records.end())
{
auto& tp = it->second;
pmap = (CacheMap*)(std::get<0>(tp));
auto& uniqe_func = std::get<2>(tp);
if (!(uniqe_func && (*uniqe_func) && (*uniqe_func)(typeid(CacheMap))))
return false;
}
else
{
pmap = new CacheMap;
struct deleter
{
void operator()(void* pmap)
{
CacheMap* ptypedmap = (CacheMap*)pmap;
delete ptypedmap;
}
};
std::function<void(void*)> *func_deleter = new std::function<void(void*)>(deleter());
struct checker
{
bool operator()(const std::type_info& info)
{
return typeid(CacheMap) == info;
}
};
std::function<bool(const std::type_info&)> *func_checker = new std::function<bool(const std::type_info&)>(checker());
m_records[edt] = std::forward_as_tuple(pmap
, std::unique_ptr<std::function<void(void*)>>(func_deleter)
, std::unique_ptr<std::function<bool(const std::type_info&)>>(func_checker));
}
if (pmap)
{
auto& ptr = (*pmap)[std::forward_as_tuple(std::forward<Keys>(keys)...)];
switch (edst)
{
default:
assert(false);
return false;
break;
case DS_QUEUE:
if (ptr.expired())
ptr = shared;
else
{
*(ptr.lock().get()) += *(shared.get());
shared.reset();
}
break;
case DS_SINGLE:
m_caches.erase(ptr.lock());
ptr = shared;
break;
}
if (shared)
m_caches.insert(std::make_pair(std::move(shared), lifems));
if (_CheckStrategy(TTL_WHEN_START))
_StartTTL(_cache);
return true;
}
return false;
}
template <typename... Keys>
bool ClrCache(DataType edt, Keys&&... keys)
{
typedef std::map<std::tuple<std::remove_const_t<std::remove_reference_t<Keys>>...>
, std::weak_ptr<cache_base>> CacheMap;
std::lock_guard<std::recursive_mutex> l(m_mutex);
auto it = m_records.find(edt);
if (it != m_records.end())
{
auto& tp = it->second;
CacheMap* pmap = (CacheMap*)(std::get<0>(tp));
auto& uniqe_func = std::get<2>(tp);
if (!(uniqe_func && (*uniqe_func) && (*uniqe_func)(typeid(CacheMap))))
return false;
if (pmap)
{
auto tp = std::forward_as_tuple(std::forward<Keys>(keys)...);
auto itf = pmap->find(tp);
if (itf != pmap->end())
{
m_caches.erase(itf->second.lock());
pmap->erase(itf);
}
if (pmap->empty())
{
delete pmap;
m_records.erase(edt);
}
}
}
return true;
}
void StartTTL(cache_base* _cache)
{
if (_CheckStrategy(TTL_WHEN_START))
return;
std::lock_guard<std::recursive_mutex> l(m_mutex);
_StartTTL(_cache);
}
void StopTTL(cache_base* _cache) {/*donothing.*/ }
private:
void _ThreadLoop()
{
while (1)
{
std::unique_lock<std::recursive_mutex> l(m_mutex);
m_condvar.wait_for(l, m_perchackduration);
for (auto it = m_queue.begin(); it != m_queue.end();)
{
if ((!m_loop_running) || (it->first <= std::chrono::steady_clock::now()))
{
if ((_CheckStrategy(TTL_WHEN_ALL_RELEASE)) ? (it->second && it->second->use_count() == 1) : true)
m_caches.erase(it->second);
it = m_queue.erase(it);
}
else if ((m_loop_running) || false)
break;
}
if (!m_loop_running)
break;
}
}
void _StartTTL(cache_base* _cache)
{
std::shared_ptr<cache_base> shared;
auto it = std::find_if(m_caches.begin()
, m_caches.end()
, [&shared, _cache](std::pair<const std::shared_ptr<cache_base>, time_t>& pr)
{
if (pr.first.get() == _cache)
{
shared = pr.first;
return true;
}
return false;
});
if (it != m_caches.end())
{
if (time_t(-1) != it->second)
m_queue.insert(std::make_pair(std::chrono::steady_clock::now()
+ std::chrono::duration_cast<std::chrono::steady_clock::duration>(std::chrono::milliseconds(it->second))
, std::move(shared)));
m_condvar.notify_one();
}
}
bool _CheckStrategy(TtlStrategy strategy)
{
return (m_strategy == TTL_WHEN_START)
? (strategy == TTL_WHEN_START)
: (strategy == TTL_WHEN_ALL_RELEASE);
}
private:
TtlStrategy m_strategy = TTL_WHEN_START;
std::chrono::milliseconds m_perchackduration = 5000ms;
bool m_loop_running = true;
std::thread* m_thread = nullptr;
std::multimap<std::chrono::steady_clock::time_point, std::shared_ptr<cache_base>> m_queue;
std::map<std::shared_ptr<cache_base>, time_t> m_caches;
typedef std::tuple<void*, std::unique_ptr<std::function<void(void*)>>, std::unique_ptr<std::function<bool(const std::type_info&)>>> RecordTuple;
typedef std::map<DataType, RecordTuple> RecordsMap;
RecordsMap m_records;
std::recursive_mutex m_mutex;
std::condition_variable_any m_condvar;
private:
cache_mgr()
{
m_thread = new std::thread(std::bind(&cache_mgr::_ThreadLoop, this));
}
~cache_mgr()
{
m_loop_running = false;
m_condvar.notify_one();
if (m_thread)
m_thread->join();
delete m_thread;
m_thread = nullptr;
std::for_each(m_records.begin()
, m_records.end()
, [](std::pair<const DataType, RecordTuple>& pr)
{
auto& tp = pr.second;
void* pmap = std::get<0>(tp);
auto& uniqe_func = std::get<1>(tp);
if (uniqe_func && *uniqe_func)
(*uniqe_func)(pmap);
});
}
cache_mgr(const cache_mgr& that) = delete;
cache_mgr(cache_mgr&& that) = delete;
cache_mgr& operator=(const cache_mgr& that) = delete;
cache_mgr& operator=(cache_mgr&& that) = delete;
};
template <typename _Ty>
class cache : public cache_base
{
typedef cache<_Ty> _Myt;
private:
template <typename... Args>
void _CheckInConstructor(Args&&... _args)
{
if (use_count() == 1)
{
assert(nullptr == m_managed);
ManageTTL(std::forward<Args>(_args)...);
}
else/* if (use_count() == 2)*/
{
assert(nullptr != m_managed);
StopTTL();
}
}
void _CheckInDestructor()
{
if (use_count() == 2)
{
StartTTL();
}
}
void _OnCopy()
{
if (_CopyCache(this, dynamic_cast<_Myt*>(m_managed)))
{
_CheckInConstructor();
}
}
void _OnAppend(const cache_base& that)
{
__OnAppend(dynamic_cast<_Myt*>(that.m_managed), type_traits::is_appendable<_Ty, _Ty>());
}
void __OnAppend(const _Myt* that, std::true_type&&)
{
if (that)
{
if (m_shared && that->m_shared)
{
*m_shared += *(that->m_shared);
}
}
}
void __OnAppend(...)
{
assert(false);
}
bool _CopyCache(cache<_Ty>* const dst, const cache<_Ty>* const src)
{
if (!dst || !src)
return false;
dst->m_shared = src->m_shared;
dst->m_Edst = src->m_Edst;
dst->m_Edt = src->m_Edt;
dst->m_lifeMs = src->m_lifeMs;
return true;
}
template <typename... Args>
void _ManageTTL(Args&&... _args)
{
cache_mgr::Instance().SetCache(m_managed, m_Edst, m_Edt, m_lifeMs, std::forward<Args>(_args)...);
}
private:
template <typename... Args>
void ManageTTL(Args&&... _args)
{
m_managed = new cache<_Ty>;
m_managed->m_managed = m_managed;
_CopyCache(dynamic_cast<_Myt*>(m_managed), this);
_ManageTTL(std::forward<Args>(_args)...);
}
void StartTTL()
{
cache_mgr::Instance().StartTTL(m_managed);
}
void StopTTL()
{
cache_mgr::Instance().StopTTL(m_managed);
}
public:
cache() noexcept
{ // construct empty cache
}
template<class _Ux,
typename... Args>
explicit cache(_Ux *_Px, DataStoreType _Edst, DataType _Edt, time_t _lifeMs, Args&&... _args)
: m_shared(_Px)
, m_Edst(_Edst)
, m_Edt(_Edt)
, m_lifeMs(_lifeMs)
{ // construct cache object that owns _Px
_CheckInConstructor(std::forward<Args>(_args)...);
}
template<class _Ux,
class _Dx,
typename... Args>
cache(_Ux *_Px, _Dx _Dt, DataStoreType _Edst, DataType _Edt, time_t _lifeMs, Args&&... _args)
: m_shared(_Px, _Dt)
, m_Edst(_Edst)
, m_Edt(_Edt)
, m_lifeMs(_lifeMs)
{ // construct with _Px, deleter
_CheckInConstructor(std::forward<Args>(_args)...);
}
cache(std::nullptr_t) noexcept
{ // construct empty cache
}
template<class _Dx,
typename... Args>
cache(std::nullptr_t _N, _Dx _Dt, DataStoreType _Edst, DataType _Edt, time_t _lifeMs, Args&&... _args)
: m_shared(_N, _Dt)
, m_Edst(_Edst)
, m_Edt(_Edt)
, m_lifeMs(_lifeMs)
{ // construct with nullptr, deleter
_CheckInConstructor(std::forward<Args>(_args)...);
}
template<class _Dx,
class _Alloc,
typename... Args>
cache(std::nullptr_t _N, _Dx _Dt, _Alloc _Ax, DataStoreType _Edst, DataType _Edt, time_t _lifeMs, Args&&... _args)
: m_shared(_N, _Dt, _Ax)
, m_Edst(_Edst)
, m_Edt(_Edt)
, m_lifeMs(_lifeMs)
{ // construct with nullptr, deleter, allocator
_CheckInConstructor(std::forward<Args>(_args)...);
}
template<class _Ux,
class _Dx,
class _Alloc,
typename... Args>
cache(_Ux *_Px, _Dx _Dt, _Alloc _Ax, DataStoreType _Edst, DataType _Edt, time_t _lifeMs, Args&&... _args)
: m_shared(_Px, _Dt, _Ax)
, m_Edst(_Edst)
, m_Edt(_Edt)
, m_lifeMs(_lifeMs)
{ // construct with _Px, deleter, allocator
_CheckInConstructor(std::forward<Args>(_args)...);
}
template<class _Ty2>
cache(const cache<_Ty2>& _Right, _Ty *_Px) noexcept : m_shared(_Right.m_shared, _Px)
{ // construct cache object that aliases _Right
m_managed = _Right.m_managed;
m_Edst = _Right.m_Edst;
m_Edt = _Right.m_Edt;
m_lifeMs = _Right.m_lifeMs;
_CheckInConstructor();
}
cache(const _Myt& _Other) noexcept : m_shared(_Other.m_shared)
{ // construct cache object that owns same resource as _Other
m_managed = _Other.m_managed;
m_Edst = _Other.m_Edst;
m_Edt = _Other.m_Edt;
m_lifeMs = _Other.m_lifeMs;
_CheckInConstructor();
}
template<class _Ty2,
class = typename std::enable_if<std::is_convertible<_Ty2 *, _Ty *>::value,
void>::type>
cache(const cache<_Ty2>& _Other) noexcept : m_shared(_Other.m_shared)
{ // construct cache object that owns same resource as _Other
m_managed = _Other.m_managed;
m_Edst = _Other.m_Edst;
m_Edt = _Other.m_Edt;
m_lifeMs = _Other.m_lifeMs;
_CheckInConstructor();
}
_Myt& operator=(_Myt&& _Right) noexcept
{ // take resource from _Right
cache(std::move(_Right)).swap(*this);
return (*this);
}
template<class _Ty2>
_Myt& operator=(cache<_Ty2>&& _Right) noexcept
{ // take resource from _Right
cache(std::move(_Right)).swap(*this);
return (*this);
}
_Myt& operator=(const _Myt& _Right) noexcept
{ // assign shared ownership of resource owned by _Right
cache(_Right).swap(*this);
return (*this);
}
template<class _Ty2>
_Myt& operator=(const cache<_Ty2>& _Right) noexcept
{ // assign shared ownership of resource owned by _Right
cache(_Right).swap(*this);
return (*this);
}
cache(_Myt&& _Right) noexcept
: cache_base(std::move(_Right))
, m_shared(std::move(_Right.m_shared))
, m_Edst(std::move(_Right.m_Edst))
, m_Edt(std::move(_Right.m_Edt))
, m_lifeMs(std::move(_Right.m_lifeMs))
{ // construct cache object that takes resource from _Right
}
void swap(_Myt& _Other) noexcept
{ // swap pointers
m_shared.swap(_Other.m_shared);
std::swap(m_managed, _Other.m_managed);
std::swap(m_Edst, _Other.m_Edst);
std::swap(m_Edt, _Other.m_Edt);
std::swap(m_lifeMs, _Other.m_lifeMs);
}
_Ty *get() const noexcept
{ // return pointer to resource
return (m_shared.get());
}
long use_count() const noexcept
{ // return use count
return m_shared.use_count();
}
typename std::add_lvalue_reference<_Ty>::type operator*() const noexcept
{ // return reference to resource
return (*(m_shared.get()));
}
_Ty *operator->() const noexcept
{ // return pointer to resource
return (m_shared.get());
}
~cache() noexcept
{ // release resource
_CheckInDestructor();
}
private:
std::shared_ptr<_Ty> m_shared;
DataStoreType m_Edst = DS_Err;
DataType m_Edt = DT_Err;
time_t m_lifeMs = 60000;
};
}
template<class _Ty>
void swap(ttl::cache<_Ty>& _Left,
ttl::cache<_Ty>& _Right) noexcept
{ // swap _Left and _Right shared_ptrs
_Left.swap(_Right);
}
#endif // _TTL_CACHE_H_
代码基于标准c++11,所以理论上不存在平台限制(目前已经验证能在"g++7.2.0","ms-vs2015"和"clang-900.0.38"编译通过)。
使用场景:
时序图如下:
上面的ttl_cache系统就是图中的"cache layer"。
存在的问题:
1. “ttl_cache_mgr”的接口都是用了泛型参数,并且内部为了兼容不同数据类型使用了void*指针强转,所以在编程上面容易引起编译期发现不了但是运行期可能导致崩溃的问题(内存损坏问题已经解决,请参照最新代码),但是理论上来说只要严格遵照自己设计的数据格式来对应Set和Get的参数,是不会出现上述问题的。此问题可以通过再次包装,通过模板的编译期分派,提供更高一层的确定参数的Get和Set版本,从而在编译期就能够发现参数不对应问题。相关代码我会在后续补上(对SetCache的改动涉及到代码结构的调整,暂时不提供代码,具体原因请参考github中todolist)。
2. 同样由于使用了void*,如果在缓存记录结构析构时缓存没有全部消费掉,清理map时调用delete(void*)会有问题,因为无法得到类型信息,delete并不会调用析构函数(已解决)。
3. 对使用该缓存系统的数据类型的要求:
1) 作为Set和Get操作中“keys“的参数类型都需要能够比较,即operator<的实现
2) 指定了暂存类型为“DS_QUEUE”的数据结构需要支持operator+=操作(再次感谢std开源,参照std::is_assignable实现了ttl_cache::type_traits::is_appendable)
4. 目前只是从原理上初步实现并验证了此缓存系统的可行性,但是在目前的第一个版本中肯定还存在各种问题,诸如一些细节上的实现技巧是否有更好的方式,算法和数据结构选择是否合理,内部处理策略是否可以再优化,是否应该选用合适的内存池来组织缓存数据等等。还希望有大神能够不吝赐教。
使用例程:(以下测试例程只为简单说明使用方法,所以均使用单一线程,但实际使用中创建缓存和使用缓存一般不会出现在同一位置,可能是不同的线程之间,需要注意)
#include "ttl_cache.h"
#include <iostream>
#include <string>
using namespace std::chrono;
using namespace std::string_literals;
enum EmDataType : ttl::DataType //数据类型
{
DT_Err = ttl::DT_Err, //错误
DT_AccountInfo,
DT_AccountList,
DT_TradeHistory,
};
class AccountInfo
{
public:
AccountInfo(long id, const std::string& owner, const std::string& bank, unsigned long balance)
: m_id(id)
, m_owner(owner)
, m_bank(bank)
, m_balance(balance)
{
std::cout << "AccountInfo standard constructor." << std::endl;
}
AccountInfo(const AccountInfo& right)
{
m_id = (right.m_id);
m_owner = (right.m_owner);
m_bank = (right.m_bank);
m_balance = (right.m_balance);
std::cout << "AccountInfo copy constructor." << std::endl;
}
AccountInfo(AccountInfo&& right)
{
m_id = std::move(right.m_id);
m_owner = std::move(right.m_owner);
m_bank = std::move(right.m_bank);
m_balance = std::move(right.m_balance);
std::cout << "AccountInfo move constructor." << std::endl;
}
~AccountInfo()
{
std::cout << "AccountInfo destructor." << std::endl;
}
public:
long m_id;
std::string m_owner;
std::string m_bank;
unsigned long m_balance;
};
//使用场景下,不需要保持right不变(const),so,使用此种写法性能更高
std::list<AccountInfo>& operator+=(std::list<AccountInfo>& left, std::list<AccountInfo>& right)
{
for(auto it = right.begin(); it != right.end(); ++it)
left.push_back(std::move(*it));
return left;
}
class TradeHistory
{
public:
TradeHistory(long account)
: m_account(account)
{
std::cout << "TradeHistory standard constructor." << std::endl;
}
TradeHistory(const TradeHistory& right)
{
m_account = (right.m_account);
m_history = (right.m_history);
std::cout << "TradeHistory copy constructor." << std::endl;
}
TradeHistory(TradeHistory&& right)
{
m_account = std::move(right.m_account);
m_history = std::move(right.m_history);
std::cout << "TradeHistory move constructor." << std::endl;
}
~TradeHistory()
{
std::cout << "TradeHistory destructor." << std::endl;
}
TradeHistory& operator+=(TradeHistory& that)
{
if (m_account == that.m_account)
for (auto it = that.m_history.begin(); it != that.m_history.end(); ++it)
m_history.push_back(std::move(*it));
return *this;
}
public:
long m_account;
std::list<std::pair<long, std::string>> m_history; //list<收支金额(+收入/-支出),金额变动说明>
};
void testttlcache_1()//单条数据
{
//1. 没有缓存时获取失败
ttl::cache<AccountInfo> s1;
//约定后两个参数表示取【持有人为"zxj"的账号为"100101"的账户】的缓存数据
bool ret1 = ttl::cache_mgr::Instance().GetCache(s1, DT_AccountInfo, "zxj"s, 100101);
//2.1) 增加一个生命周期10s的缓存数据
{
ttl::cache<AccountInfo> c1(new AccountInfo(100101, "zxj", "中国银行", 666666), ttl::DS_SINGLE, DT_AccountInfo, 10000, "zxj"s, 100101);
}
//2.2) 即时离开了c1的作用域,一样能够获取到前面加入的缓存数据,【注意:后两个参数类型与增加缓存时对应】
ttl::cache<AccountInfo> s2;
bool ret2 = ttl::cache_mgr::Instance().GetCache(s2, DT_AccountInfo, "zxj"s, 100101);
//3.1) 无法获取到未加入的缓存数据,【注意:后两个参数类型与增加缓存时对应】
ttl::cache<AccountInfo> s3;
bool ret3 = ttl::cache_mgr::Instance().GetCache(s3, DT_AccountInfo, "lxy"s, 100102);
//3.2) 增加一个生命周期10s的缓存数据
ttl::cache<AccountInfo> c2(new AccountInfo(100102, "lxy", "建设银行", 654321), ttl::DS_SINGLE, DT_AccountInfo, 10000, "lxy"s, 100102);
//3.3) 这时可以获取到(对比3.1步骤)
ttl::cache<AccountInfo> s4;
bool ret4 = ttl::cache_mgr::Instance().GetCache(s4, DT_AccountInfo, "lxy"s, 100102);
//4.1) 修改了缓存中的账户余额
ttl::cache<AccountInfo> c3(new AccountInfo(100101, "zxj", "中国银行", 999999), ttl::DS_SINGLE, DT_AccountInfo, 10000, "zxj"s, 100101);
//4.2) 重新获取缓存数据,得到最新数据
ttl::cache<AccountInfo> s5;
bool ret5 = ttl::cache_mgr::Instance().GetCache(s5, DT_AccountInfo, "zxj"s, 100101);
//5.1) 清除【持有人为"zxj"的账号为"100101"的账户】的缓存数据
ttl::cache_mgr::Instance().ClrCache(DT_AccountInfo, "zxj"s, 100101);
//5.2) 获取不到对应的缓存了
ttl::cache<AccountInfo> s6;
bool ret6 = ttl::cache_mgr::Instance().GetCache(s6, DT_AccountInfo, "zxj"s, 100101);
//6. 使用和最开始调用不同的keys参数创建新缓存,会导致内存混乱,或致崩溃
ttl::cache<AccountInfo> c4(new AccountInfo(100101, "zxj", "中国银行", 888888), ttl::DS_SINGLE, DT_AccountInfo, 10000, "zxj"s, 100101,"中国银行"s);
ttl::cache<AccountInfo> s7;
bool ret7 = ttl::cache_mgr::Instance().GetCache(s7, DT_AccountInfo, "zxj"s, 100101, "中国银行"s);
//7. 等到缓存生命周期结束,则不能再获取到缓存数据【注意:具体能不能获取到和ttl::ttl_cache_mgr的缓存策略"ttl::ttl_cache_mgr::TtlStrategy"有关】
std::this_thread::sleep_for(std::chrono::seconds(10));
ttl::cache<AccountInfo> s8;
bool ret8 = ttl::cache_mgr::Instance().GetCache(s8, DT_AccountInfo, "lxy"s, 100102);
}
void testttlcache_2()
{
//1. 没有缓存时获取失败
ttl::cache<std::list<AccountInfo>> s1;
// keys只有一个参数,表示获取【持有人为"zxj"的账户列表】缓存数据
bool ret1 = ttl::cache_mgr::Instance().GetCache(s1, DT_AccountList,"zxj"s);
//2.1) 增加一个缓存数据,序列数据涉及到实时更新,比如订阅推送数据,可指定生命周期为无限长(time_t(-1))
auto l1 = new std::list<AccountInfo>;
l1->emplace_back(100103, "zxj", "交通银行", 900000);
ttl::cache<std::list<AccountInfo>> c1(l1, ttl::DS_QUEUE, DT_AccountList, -1, "zxj"s);
//2.2) 获取前面加入的缓存数据
ttl::cache<std::list<AccountInfo>> s2;
bool ret2 = ttl::cache_mgr::Instance().GetCache(s2, DT_AccountList, "zxj"s);
//3.1) 增加两个缓存数据项
auto l2 = new std::list<AccountInfo>;
l2->emplace_back(100104, "zxj", "农业银行", 980000);
l2->emplace_back(100105, "zxj", "工商银行", 990000);
ttl::cache<std::list<AccountInfo>> c2(l2, ttl::DS_QUEUE, DT_AccountList, -1, "zxj"s);
//3.2) 获取前面加入的缓存数据,应该有三条数据
ttl::cache<std::list<AccountInfo>> s3;
bool ret3 = ttl::cache_mgr::Instance().GetCache(s3, DT_AccountList, "zxj"s);
//4. 清除前面加入的"zxj"名下的缓存
ttl::cache_mgr::Instance().ClrCache(DT_AccountList,"zxj"s);
//5.1) 增加一个新的缓存数据项
auto l3 = new std::list<AccountInfo>;
l3->emplace_back(100106, "zxj", "招商银行", 970000);
ttl::cache<std::list<AccountInfo>> c3(l3, ttl::DS_QUEUE, DT_AccountList, -1, "zxj"s);
//2.2) 获取前面加入的缓存数据,应该只有一条数据
ttl::cache<std::list<AccountInfo>> s4;
bool ret4 = ttl::cache_mgr::Instance().GetCache(s4, DT_AccountList, "zxj"s);
}
void testttlcache_3()
{
//1. 没有缓存时获取失败
ttl::cache<TradeHistory> s1;
// keys只有一个参数,表示获取【账号为"100101"的账户的交易明细】缓存数据
bool ret1 = ttl::cache_mgr::Instance().GetCache(s1, DT_TradeHistory, 100101);
//2.1) 增加一个缓存数据,序列数据涉及到实时更新,比如订阅推送数据,可指定生命周期为无限长(time_t(-1))
auto t1 = new TradeHistory(100101);
t1->m_history.emplace_back(6666, "发工资啦");
t1->m_history.emplace_back(-1000, "楼下足疗店一掷千金");
ttl::cache<TradeHistory> c1(t1, ttl::DS_QUEUE, DT_TradeHistory, -1, 100101);
//2.2) 获取前面加入的缓存数据
ttl::cache<TradeHistory> s2;
bool ret2 = ttl::cache_mgr::Instance().GetCache(s2, DT_TradeHistory, 100101);
//3.1) 增加两个缓存数据项
auto t2 = new TradeHistory(100101);
t2->m_history.emplace_back(100000104, "突然多了这么多,银行系统bug了?");
t2->m_history.emplace_back(-100000000, "XX银行:sorry~,上一笔系清洁工手误。。");
ttl::cache<TradeHistory> c2(t2, ttl::DS_QUEUE, DT_TradeHistory, -1, 100101);
//3.2) 获取前面加入的缓存数据,应该有四条数据
ttl::cache<TradeHistory> s3;
bool ret3 = ttl::cache_mgr::Instance().GetCache(s3, DT_TradeHistory, 100101);
//4. 清除前面加入的"100101账户"名下的缓存
ttl::cache_mgr::Instance().ClrCache(DT_TradeHistory, 100101);
//5.1) 增加一个新的缓存数据项
auto t3 = new TradeHistory(100101);
t3->m_history.emplace_back(-1314520, "emmm,光棍节发红包支出");
ttl::cache<TradeHistory> c3(t3, ttl::DS_QUEUE, DT_TradeHistory, -1, 100101);
//2.2) 获取前面加入的缓存数据,应该只有一条数据
ttl::cache<TradeHistory> s4;
bool ret4 = ttl::cache_mgr::Instance().GetCache(s4, DT_TradeHistory, 100101);
}
void test_ttlcache()
{
testttlcache_1();
testttlcache_2();
testttlcache_3();
}
int main()
{
test_ttlcache();
return 1;
}
测试例程中使用了针对std::string的operator""s(c14)操作符,所以g++或clang编译请加上-std=c++14选项