目录
测试程序
模拟CPU根据主存地址,向高速缓冲的读操作
以及DMA或主存向缓冲的写操作
包含90%的写操作,和10%的读操作
最终计算命中率和读写总次数
int main(void){
constexpr auto ADDRESS_MAX { 1024U };
//主存块地址空间总容量1K
srand((unsigned)time(nullptr));
using Address_t = int32_t;
using Data_t = char;
LFUCache<Address_t, Data_t> cache;
std::mt19937 gen(std::random_device{}());
std::uniform_int_distribution<> distrib(0, 100);
//标准库提供平均分布随机生成器
int rounds { 1024 }; //总操作次数
std::size_t hits { 0U }; //命中次数
std::size_t misses { 0U }; //未命中次数
std::size_t loads { 0U }; //写操作次数
std::size_t fetches { 0U }; //读操作次数
while( rounds-- ){
if(distrib(gen) <= 90){ //90%写操作
++loads;
int address { rand() % ADDRESS_MAX }; //随机取地址 忽略程序局部性
char data { (char)(rand() % 26 + 'a') };
std::printf("load [%x, %c]...\n",address, data);
cache.put({address, data});
}else{ //10%读操作
++fetches;
int address { rand() % ADDRESS_MAX };
std::printf("try to fetch data at address [%x] from cache...\n", address);
auto result { cache.get(address) };
if( result.has_value() ){ //命中
++hits;
std::printf("hit the data [%c] from cache...\n", *result);
}else{ //未命中
++misses;
puts("miss the data from cache...\n");
}
}
}
puts("-------result-------------"); //统计结果展示
std::cout << "load times:\t" << loads << '\n'
<< "fetch times:\t" << fetches << '\n'
<< "hit times:\t" << hits << '\n'
<< "miss times:\t" << misses << '\n'
<< "hit rate:\t"
<< std::ios::fixed
<< std::setprecision(4)
<< (double)hits/fetches;
std::endl(std::cout);
return 0;
}
程序运行时截图
Cache实现部分
#include <unordered_map>
#include <concepts>
#include <type_traits>
#include <functional>
#include <optional>
#include <utility>
#include <deque>
#include <algorithm>
#include <random>
#include <cstdint>
#include <ctime>
#include <cstdio>
#include <exception>
#include <iostream>
#include <iomanip>
template <class Key, class Value,
class Hash = std::hash<Key>,
class = std::common_type<std::result_of<
typename std::decay<Hash>::type(Key&&)>, std::size_t>>
class LFUCache{
public:
typedef std::pair<Key, Value> store_type;
typedef std::pair<std::size_t, Key> frequency_type;
typedef std::optional<Value> get_return_type;
static constexpr std::size_t default_capacity { 64U };
protected:
mutable
std::unordered_map<Key, Value, Hash> hash;
std::deque<frequency_type> cache;
std::unordered_map<Key,
typename std::deque<frequency_type>::iterator> associator;
private:
std::size_t capacity { default_capacity };
template <class __Key, class __Value, class __Hash, class>
friend class LFUCache;
public:
constexpr LFUCache(void) noexcept = default;
LFUCache(std::size_t __capacity)
noexcept : capacity(__capacity) {}
explicit LFUCache(const LFUCache&) noexcept = default;
template <class __Key, class __Value, class __Hash>
requires std::convertible_to<Key, __Key>
&& std::convertible_to<Value, __Value>
constexpr explicit
LFUCache( LFUCache<__Key, __Value, __Hash>&& other)
noexcept(noexcept(hash = std::exchange(other.hash, decltype(hash){}))
&& noexcept(cache = std::exchange(other.cache, decltype(cache){})))
: capacity(other.capaicty)
, hash( std::exchange(other.hash, decltype(hash){}))
, cache( std::exchange(other.cache, decltype((cache)){})) {}
[[discard]] inline get_return_type
get( const Key& key)
requires std::movable<Key>
try{
if(!hash.count(key))
return static_cast<get_return_type>(std::nullopt);
++(*associator[key]).first;
std::make_heap(cache.begin(), cache.end(), std::less<>());
return std::make_optional(key);
}catch(const std::exception& e){
std::cout << e.what() << '\n';
}
void put(const store_type& p)
requires std::assignable_from<Key&, Key>
&& std::assignable_from<Value&, Value>
try{
const auto& [__key, __value] = p;
if(hash.count(__key)) return(void)(hash[__key] = __value);
if(cache.size() >= capacity){
hash.erase(cache.front().second);
associator.erase(cache.front().second);
cache.pop_front();
}
hash.emplace(__key, __value);
cache.emplace_back(0U, __key);
associator[__key] = (--cache.end());
}catch(const std::exception& e){
std::cout << e.what() << '\n';
}
};