BlockInfo
struct BlockInfo
{
uint32_t block_id_; //块编号
int32_t version_; //块当前版本号
int32_t file_count_; //当前已保存文件总数
int32_t size_t_; //当前已保存文件数据的总大小
int32_t del_file_count_; //已删除的文件数量
int32_t del_size_; //已删除文件数据总大小
uint32_t seq_no_; //下一个可分配的文件编号
BlockInfo()
{
memset(this,0,sizeof(BlockInfo)); //小技巧,将所有成员变量清零
}
inline bool operator == (const BlockInfo & rhs) const
{
return block_id_ ==ths.block_id_ && version_ == rhs.version_
&& file_count_ == rhs.file_count_ && size_t_ == rhs.size_t_
&& del_file_count_ == rhs.del_file_count_ && del_size_ == rhs.del_size_
&& seq_no_ == rhs.seq_no_;
}
};
MetaInfo
struct MetaInfo
{
public:
MetaInfo()
{
init();
}
MetaInfo(const int64_t file_id,const int32_t inner_offset,const int32_t file_size,const int32_t next_meta_offset)
{
fileid_ = file_id;
location_.inner_offset_ = inner_offset;
location_.size_ = file_size;
next_meta_offset_ = next_meta_offset;
}
MetaInfo(const MetaInfo& meta_info)
{
memcpy(this,&meta_info,sizeof(MetaInfo));
}
MetaInfo& operator=(const MetaInfo& meta_info)
{
if(this == &meta_info)
{
return *this;
}
fileid_ = meta_info.fileid_;
location_.inner_offset_ = meta_info.location_.inner_offset_;
location_.size_ = meta_info.location_.size_;
next_meta_offset_ = meta_info.next_meta_offset_ ;
}
//克隆
MetaInfo& clone(const MetaInfo& meta_info)
{
assert(this != &meta_info);
fileid_ = meta_info.fileid_;
location_.inner_offset_ = meta_info.location_.inner_offset_;
location_.size_ = meta_info.location_.size_;
next_meta_offset_ = meta_info.next_meta_offset_ ;
return *this;
}
bool operator == (const MetaInfo& rhs)
{
return fileid_ == rhs.fileid_ && location_.inner_offset_ = rhs.location_.inner_offset_
&& location_.size_ = rhs.location_.size_ && next_meta_offset_ = rhs.next_meta_offset_ ;
}
//key
uint64_t get_key() const
{
return fileid_;
}
void set_key(const int64_t key)
{
fileid_ = key;
}
//fileid_
uint64_t get_file_id() const
{
return fileid_;
}
void set_file_id(const uint64_t file_id)
{
fileid_ = file_id;
}
//inner_offset_
int32_t get_offset() const
{
return location_.inner_offset_;
}
void set_offset(const int32_t offset)
{
location_.inner_offset_ = offset;
}
//size_
int32_t get_size() const
{
return location_.size_;
}
void set_size(const int32_t file_size)
{
location_.size_ = file_size;
}
//offset
int32_t get_next_meta_offset() const
{
return next_meta_offset_;
}
void set_next_meta_offset(const int32_t offset)
{
next_meta_offset_ = offset;
}
private:
uint64_t fileid_; //文件编号
struct
{
int32_t inner_offset_;
int32_t size_;
}location_;
int32_t next_meta_offset_; //当前哈希链表下一个节点在索引文件的偏移量
private:
void init()
{
fileid_ = 0;
location_.inner_offset_ = 0;
location_.size_ = 0;
next_meta_offset_ = 0;
}
};
common.h
#ifndef _COMMON_H_
#define _COMMON_H_
#include <iostream> // 输入输出流库,用于控制台输出
#include <fcntl.h> // 文件控制库,包含文件操作函数(如 open)
#include <sys/types.h> // 系统类型库,包含一些基本的系统数据类型
#include <sys/stat.h> // 系统状态库,包含文件状态获取函数(如 stat)
#include <sys/mman.h> // 内存映射库,用于进行文件到内存的映射
#include <string> // 字符串库,包含 C++ 的字符串类
#include <string.h> // 字符串库,包含 C 的字符串操作函数
#include <stdint.h> // 整数类型库,包含标准整数类型定义
#include <errno.h> // 错误码库,用于获取全局错误码变量
#include <stdio.h> // 标准输入输出库,包含标准输入输出函数
#include <unistd.h> // 标准系统库,包含系统基本函数
#include <stdlib.h> // 标准库,包含标准函数库
#include<inttypes.h>
#include<assert.h>
namespace Airwave
{
namespace largefile
{
const int32_t TFS_SUCCESS = 0; //success
const int32_t TFS_ERROR = -1; //error
const int32_t EIXT_DISK_OPER_INCOMPLETE = -8012; //read or write length is less than required
const int32_t EXIT_INDEX_ALREADY_LOADED_ERROR = -8013; //索引文件已经被加载
const int32_t EXIT_META_UNEXPECT_FOUND_ERROR = -8014; //meta found in index when insert
const int32_t EXIT_INDEX_CORRUPT_ERROR = -8015; //index is corript.
const int32_t EXIT_BLOCKID_CONFLICT_ERROR = -8016;
const int32_t EXIT_BUCKET_CONFIGURE_ERROR = -8017;
const int32_t EXIT_META_NOT_FOUND_ERROR = -8018;
const int32_t EXIT_BLOCKID_ZERO_ERROR = -8019;
static const std::string MAINBLOCK_DIR_PREFIX = "/mainblock/";
static const std::string INDEX_DIR_PREFIX = "/index/";
static const mode_t DIR_MODE = 0755; //设置权限
enum OperType
{
C_OPER_INSERT = 1,
C_OPER_DELETE
};
//用于存储内存映射选项的相关参数
struct MMapOption
{
int32_t max_mmap_size_; //最大映射大小
int32_t first_mmap_size_; //首次映射大小
int32_t per_mmap_size_; //每次映射增加的大小
};
struct BlockInfo
{
uint32_t block_id_; //块编号
int32_t version_; //块当前版本号
int32_t file_count_; //当前已保存文件总数
int32_t size_; //当前已保存文件数据的总大小
int32_t del_file_count_; //已删除的文件数量
int32_t del_size_; //已删除文件数据总大小
uint32_t seq_no_; //下一个可分配的文件编号
BlockInfo()
{
memset(this,0,sizeof(BlockInfo)); //小技巧,将所有成员变量清零
}
inline bool operator == (const BlockInfo & rhs) const
{
return block_id_ ==rhs.block_id_ && version_ == rhs.version_
&& file_count_ == rhs.file_count_ && size_ == rhs.size_
&& del_file_count_ == rhs.del_file_count_ && del_size_ == rhs.del_size_
&& seq_no_ == rhs.seq_no_;
}
};
struct MetaInfo
{
public:
MetaInfo()
{
init();
}
MetaInfo(const int64_t file_id,const int32_t inner_offset,const int32_t file_size,const int32_t next_meta_offset)
{
fileid_ = file_id;
location_.inner_offset_ = inner_offset;
location_.size_ = file_size;
next_meta_offset_ = next_meta_offset;
}
MetaInfo(const MetaInfo& meta_info)
{
memcpy(this,&meta_info,sizeof(MetaInfo));
}
MetaInfo& operator=(const MetaInfo& meta_info)
{
if(this == &meta_info)
{
return *this;
}
fileid_ = meta_info.fileid_;
location_.inner_offset_ = meta_info.location_.inner_offset_;
location_.size_ = meta_info.location_.size_;
next_meta_offset_ = meta_info.next_meta_offset_ ;
}
//克隆
MetaInfo& clone(const MetaInfo& meta_info)
{
assert(this != &meta_info);
fileid_ = meta_info.fileid_;
location_.inner_offset_ = meta_info.location_.inner_offset_;
location_.size_ = meta_info.location_.size_;
next_meta_offset_ = meta_info.next_meta_offset_ ;
return *this;
}
bool operator == (const MetaInfo& rhs)
{
return fileid_ == rhs.fileid_ && location_.inner_offset_ == rhs.location_.inner_offset_
&& location_.size_ == rhs.location_.size_ && next_meta_offset_ == rhs.next_meta_offset_;
}
//key
uint64_t get_key() const
{
return fileid_;
}
void set_key(const int64_t key)
{
fileid_ = key;
}
//fileid_
uint64_t get_file_id() const
{
return fileid_;
}
void set_file_id(const uint64_t file_id)
{
fileid_ = file_id;
}
//inner_offset_
int32_t get_offset() const
{
return location_.inner_offset_;
}
void set_offset(const int32_t offset)
{
location_.inner_offset_ = offset;
}
//size_
int32_t get_size() const
{
return location_.size_;
}
void set_size(const int32_t file_size)
{
location_.size_ = file_size;
}
//offset
int32_t get_next_meta_offset() const
{
return next_meta_offset_;
}
void set_next_meta_offset(const int32_t offset)
{
next_meta_offset_ = offset;
}
private:
uint64_t fileid_; //文件编号
struct
{
int32_t inner_offset_;
int32_t size_;
}location_;
int32_t next_meta_offset_; //当前哈希链表下一个节点在索引文件的偏移量
private:
void init()
{
fileid_ = 0;
location_.inner_offset_ = 0;
location_.size_ = 0;
next_meta_offset_ = 0;
}
};
}
}
#endif // _COMMON_H_
- 以block文件的形式存放数据文件(一般64M一个block),以下简称为“块”,每个块都有唯一的一个整数编号,块在使用之前所用到的存储空间都会预先分配和初始化。
- 每一个块由一个索引文件、一个主块文件和若干个扩展块组成,“小文件”主要存放在主块中,扩展块主要用来存放溢出的数据。
- 每个索引文件存放对应的块信息和“小文件”索引信息,索引文件会在服务启动是映射(mmap)到内存,以便极大的提高文件检索速度。
- “小文件”索引信息采用在索引文件中的数据结构哈希链表来实现。 每个文件有对应的文件编号,文件编号从1开始编号,依次递增,同时作为哈希查找算法的Key 来定位“小文件”在主块和扩展块中的偏移量。文件编号+块编号按某种算法可得到“小文件”对应的文件名。
index_handle.h
#ifndef AIRWAVE_LARGEFILE_INDEX_HANDEL_H_
#define AIRWAVE_LARGEFILE_INDEX_HANDEL_H_
#include"common.h"
#include"mmap_file_op.h"
namespace Airwave
{
namespace largefile
{
//索引文件处理
struct IndexHeader
{
IndexHeader()
{
memset(this,0,sizeof(IndexHeader)); //初始化为0
}
BlockInfo block_info_; //meta block info
int32_t bucket_size_; //hash bucket size
int32_t data_file_offset_; //offset to write next data in block
int32_t index_file_size_; //索引文件当前偏移 offset after:index_header + all buckets
int32_t free_head_offset_; //free meta node list,for resue
};
class IndexHandle
{
public:
IndexHandle(const std::string& base_path,const uint32_t main_block_id);
~IndexHandle();
// create blockfile ,write index header and buckets info into the file
int create(const uint32_t logic_block_id,const int32_t cfg_bucket_size,const MMapOption map_option);
// load blockfile into memory, check block info
int load(const uint32_t logic_block_id,const int32_t cfg_bucket_size,const MMapOption map_option);
// remove index:unmmap and unlink, clear memory map, delete blockfile
int remove(const uint32_t logic_block_id);
// flush file to disk
int flush();
IndexHeader* index_header()
{
return reinterpret_cast<IndexHeader*> (file_op_->get_map_data()); //获取映射后的文件内存地址
}
int update_block_info(const OperType oper_type, const uint32_t modify_size);
BlockInfo* block_info()
{
return reinterpret_cast<BlockInfo*>(file_op_->get_map_data());
}
int32_t bucket_size() const
{
return reinterpret_cast<IndexHeader*> (file_op_->get_map_data())->bucket_size_;
}
int32_t get_block_data_offset() const
{
return reinterpret_cast<IndexHeader*> (file_op_->get_map_data())-> data_file_offset_;
}
int32_t* bucket_slot()
{
return reinterpret_cast<int32_t*> (reinterpret_cast<char*> (file_op_->get_map_data()) + sizeof(IndexHeader));
}
void commit_block_data_offset(const int file_size)
{
reinterpret_cast<IndexHeader*> (file_op_->get_map_data())-> data_file_offset_ += file_size;
}
int32_t free_head_offset() const
{
return reinterpret_cast<IndexHeader*> (file_op_->get_map_data())-> free_head_offset_;
}
int32_t write_segment_meta(const uint64_t key, MetaInfo &meta);
int32_t read_segment_meta(const uint64_t key, MetaInfo &meta);
int32_t delete_segment_meta(const uint64_t key);
int32_t hash_find(const uint64_t key, int32_t& current_offset,int32_t& previous_offset);
int32_t hash_insert(const uint64_t key, int32_t previous_offset, MetaInfo &meta);
private:
bool hash_compare(const uint64_t left_key, const int64_t right_key)
{
return (left_key == right_key);
}
MMapFileOperation* file_op_;
bool is_load_;
};
}
}
#endif //AIRWAVE_LARGEFILE_INDEX_HANDEL_H_
index_handle.cpp
#include"common.h"
#include"index_handle.h"
#include<sstream>
static int debug = 1;
namespace Airwave
{
namespace largefile
{
IndexHandle::IndexHandle(const std::string& base_path,const uint32_t main_block_id)
{
//创建文件处理对象,确认路径
std::stringstream tmp_stream;
tmp_stream << base_path <<INDEX_DIR_PREFIX<<main_block_id; // /root/projects/mmap/index/1
std::string index_path;
tmp_stream >> index_path;
file_op_ = new MMapFileOperation(index_path,O_CREAT | O_RDWR | O_LARGEFILE);
is_load_ = false;
}
IndexHandle::~IndexHandle()
{
if(file_op_)
{
delete file_op_;
file_op_ = NULL;
}
}
int IndexHandle::create(const uint32_t logic_block_id,const int32_t cfg_bucket_size,const MMapOption map_option)
{
int ret = 0;
if(debug)
{
printf("create index ,block id:%u , bucket size : %d , max_mmap_size : %d , first_mmap_size: %d , per_mmap_size: %d",
logic_block_id,
cfg_bucket_size,
map_option.max_mmap_size_,
map_option.first_mmap_size_,
map_option.per_mmap_size_);
}
if(is_load_)
{
return EXIT_INDEX_ALREADY_LOADED_ERROR; //索引文件已经被加载
}
int64_t file_size = file_op_->get_file_size();
if(file_size < 0)
{
return TFS_ERROR;
}
else if( file_size == 0) //文件未空
{
IndexHeader i_header;
i_header.block_info_.block_id_ = logic_block_id;
i_header.block_info_.seq_no_ = 1;
i_header.bucket_size_ = cfg_bucket_size;
i_header.index_file_size_ = sizeof(IndexHeader) + cfg_bucket_size*sizeof(int32_t);
//index header + total buckets
char* init_data = new char[i_header.index_file_size_];
memcpy(init_data,&i_header,sizeof(IndexHeader));
memset(init_data + sizeof(IndexHeader),0,i_header.index_file_size_ - sizeof(IndexHeader));
ret = file_op_->pwrite_file(init_data,i_header.index_file_size_,0);
delete init_data;
init_data = NULL;
if(ret != TFS_SUCCESS)
{
return ret;
}
ret = file_op_->flush_file();
if(ret!= TFS_SUCCESS)
{
return ret;
}
}
else //file size > 0,index already exit
{
return EXIT_META_UNEXPECT_FOUND_ERROR;
}
ret = file_op_->mmap_file(map_option);
if(ret != TFS_SUCCESS)
{
return ret;
}
is_load_ = true;
if(debug)
{
printf("init blockid: %u index successful. data file size: %d, index file size: %d, bucket size: %d, free head offset: %d, \nseqno: %d, size: %d, filecount: %d, del_size: %d, del_file_count: %d version: %d\n",
logic_block_id, index_header()->data_file_offset_, index_header()->index_file_size_,
index_header()->bucket_size_, index_header()->free_head_offset_, block_info()->seq_no_, block_info()->size_,
block_info()->file_count_, block_info()->del_size_, block_info()->del_file_count_, block_info()->version_);
}
return TFS_SUCCESS;
}
int IndexHandle::load(const uint32_t logic_block_id,const int32_t cfg_bucket_size,const MMapOption map_option)
{
int ret = TFS_SUCCESS;
if(is_load_)
{
return EXIT_INDEX_ALREADY_LOADED_ERROR;
}
int64_t file_size = file_op_->get_file_size();
if(file_size<0)
{
return file_size;
}
else if(file_size == 0) //empty file
{
return EXIT_INDEX_CORRUPT_ERROR;
}
MMapOption tmp_map_option = map_option;
if(file_size > tmp_map_option.first_mmap_size_ && file_size <= tmp_map_option.max_mmap_size_)
{
tmp_map_option.first_mmap_size_ = file_size;
}
ret = file_op_->mmap_file(tmp_map_option);
if(ret != TFS_SUCCESS)
{
return ret;
}
if(0 == bucket_size() || 0 == block_info()->block_id_)
{
fprintf(stderr,"Index corrupt error.blockid : %u, bucket size: %d\n",
block_info()->block_id_, bucket_size());
return EXIT_INDEX_CORRUPT_ERROR;
}
//check file size
int32_t index_file_size = sizeof(IndexHeader) + bucket_size()*sizeof(int32_t);
// uncomplete index file
if(file_size < index_file_size)
{
fprintf(stderr, "index corrupt error, blockid : %u, bucket size: %d, file size : %ld, index file size : %d\n",
block_info()->block_id_,bucket_size(),file_size,index_file_size);
return EXIT_INDEX_CORRUPT_ERROR;
}
// check block_id
if (logic_block_id != block_info()->block_id_)
{
fprintf(stderr, "block id conflict. blockid: %u, index blockid: %u\n", logic_block_id, block_info()->block_id_);
return EXIT_BLOCKID_CONFLICT_ERROR;
}
// check block size
if (cfg_bucket_size != bucket_size())
{
fprintf(stderr, "Index configure error. old bucket size: %d, new bucket size: %d\n",
bucket_size(),cfg_bucket_size);
return EXIT_BUCKET_CONFIGURE_ERROR;
}
is_load_ = true;
if(debug)
{
printf("load blockid: %u index successful. data file offset: %d, index file size: %d, bucket size: %d, free head offset: %d, \nseqno: %d, size: %d, filecount: %d, del size: %d, del file count: %d version: %d\n",
logic_block_id, index_header()->data_file_offset_, index_header()->index_file_size_, bucket_size(),
index_header()->free_head_offset_, block_info()->seq_no_, block_info()->size_, block_info()->file_count_,
block_info()->del_size_, block_info()->del_file_count_, block_info()->version_);
}
return TFS_SUCCESS;
}
int IndexHandle::remove(const uint32_t logic_block_id)
{
if(is_load_)
{
if(logic_block_id != block_info()->block_id_)
{
fprintf(stderr, "block id conflict. blockid : %d, index blockid : %d\n", logic_block_id, block_info()->block_id_);
return EXIT_BUCKET_CONFIGURE_ERROR;
}
}
int ret = file_op_->munmap_file();
if (TFS_SUCCESS != ret)
return ret;
ret = file_op_->unlink_file();
return ret;
}
int IndexHandle::flush()
{
int ret = file_op_->flush_file();
if(ret != TFS_SUCCESS)
{
fprintf(stderr, " index flush failed, ret : %d error desc : %s\n", ret ,strerror(errno));
}
return ret;
}
int IndexHandle::update_block_info(const OperType oper_type, const uint32_t modify_size)
{
if(block_info()->block_id_ == 0)
{
return EXIT_BLOCKID_ZERO_ERROR;
}
if(oper_type == C_OPER_INSERT)
{
++block_info()->version_;
++block_info()->file_count_;
++block_info()->seq_no_;
block_info()->size_ += modify_size;
}
else if(oper_type == C_OPER_DELETE)
{
++block_info()->version_;
--block_info()->file_count_;
block_info()->size_ -= modify_size;
++block_info()->del_file_count_;
block_info()->del_size_ += modify_size;
}
if(debug)
{
printf("update block info. blockid: %u, version: %u, file count: %u, size: %u, del file count: %u, del size: %u, seq no: %u, oper type: %d\n",
block_info()->block_id_, block_info()->version_, block_info()->file_count_, block_info()->size_,
block_info()->del_file_count_, block_info()->del_size_, block_info()->seq_no_, oper_type);
}
return TFS_SUCCESS;
}
int32_t IndexHandle::write_segment_meta(const uint64_t key, MetaInfo &meta)
{
int ret =TFS_SUCCESS;
int32_t current_offset = 0, previous_offset = 0;
//检查key是否存在
//1.从文件哈希表中查找key是否存在 hash_find(key, current_offset, previous_offset)
ret = hash_find(key, current_offset, previous_offset);
if(ret == TFS_SUCCESS)
{
return EXIT_META_UNEXPECT_FOUND_ERROR;
}
else if(ret != EXIT_META_NOT_FOUND_ERROR)
{
return ret;
}
//2.不存在就写入meta到文件哈希表中hash_insert(key, previous_offset, meta)
ret = hash_insert(key, previous_offset, meta);
return ret;
}
int32_t IndexHandle::read_segment_meta(const uint64_t key, MetaInfo &meta)
{
int32_t current_offset = 0,previous_offset = 0;
//1.确定key存放的桶(slot)的位置
//int32_t slot = static_cast<uint32_t>(key) % bucket_size();
int ret = hash_find(key, current_offset, previous_offset);
if(ret == TFS_SUCCESS)
{
ret = file_op_->pread_file(reinterpret_cast<char*>(&meta), sizeof(MetaInfo), current_offset);
}
return ret;
}
int32_t IndexHandle::delete_segment_meta(const uint64_t key)
{
int ret = TFS_SUCCESS;
int32_t current_offset = 0, previous_offset = 0;
ret = hash_find(key, current_offset, previous_offset);
if(ret != TFS_SUCCESS)
{
return ret;
}
MetaInfo meta_info;
ret = file_op_->pread_file(reinterpret_cast<char*>(&meta_info), sizeof(MetaInfo), current_offset);
if(TFS_SUCCESS != ret)
{
return ret;
}
int32_t next_pos = meta_info.get_next_meta_offset();
if(previous_offset == 0)
{
int32_t slot = static_cast<uint32_t>(key) % bucket_size();
bucket_slot()[slot] = next_pos;
}
else
{
MetaInfo pre_meta_info;
ret = file_op_->pread_file(reinterpret_cast<char*>(&pre_meta_info), sizeof(MetaInfo), current_offset);
if(TFS_SUCCESS != ret)
{
return ret;
}
pre_meta_info.set_next_meta_offset(next_pos);
ret = file_op_->pwrite_file(reinterpret_cast<char*>(&pre_meta_info), sizeof(MetaInfo), previous_offset);
if(TFS_SUCCESS != ret)
{
return ret;
}
}
//把删除节点加入可重用节点链表
meta_info.set_next_meta_offset(free_head_offset());
ret = file_op_->pwrite_file(reinterpret_cast<char*>(&meta_info), sizeof(MetaInfo), current_offset);
if(TFS_SUCCESS != ret)
{
return ret;
}
index_header()->free_head_offset_ = current_offset;
if(debug) printf("delete_segment_meta-reuse metainfo, current_offset: %d\n",current_offset);
update_block_info(C_OPER_DELETE,meta_info.get_size());
return TFS_SUCCESS;
}
int IndexHandle::hash_find(const uint64_t key, int32_t& current_offset,int32_t& previous_offset)
{
int ret = TFS_SUCCESS;
MetaInfo meta_info;
current_offset = 0;
previous_offset = 0;
//1.确定key存放的桶(slot)的位置
//2.读取桶首节点存储的第一个节点的偏移量,如果偏移量为零,直接返回key不存在 EXIT_META_NOT_FOUND_ERROR
//3.根据偏移量读取存储的metainfo
//4.与key进行比较,相等则设置current_offset 和 previous_offset 并返回TFS_SUCCESS,否则继续执行5
//5.从metainfo中取得下一个节点在文件中的偏移量,如果偏移量为0,直接返回key不存在 EXIT_META_NOT_FOUND_ERROR,
//否则,跳转到 3 继续循环执行
int32_t slot = static_cast<uint32_t>(key) % bucket_size();
int32_t pos = bucket_slot()[slot];
for(;pos != 0;)
{
ret = file_op_->pread_file(reinterpret_cast<char*>(&meta_info), sizeof(MetaInfo), pos);
if(TFS_SUCCESS != ret)
{
return ret;
}
if(hash_compare(key, meta_info.get_key()))
{
current_offset = pos;
return TFS_SUCCESS;
}
previous_offset = pos;
pos = meta_info.get_next_meta_offset();
}
return EXIT_META_NOT_FOUND_ERROR;
}
int32_t IndexHandle::hash_insert(const uint64_t key, int32_t previous_offset, MetaInfo &meta)
{
int ret = TFS_SUCCESS;
MetaInfo tmp_meta_info;
int32_t current_offset = 0;
//1.确定key存放的桶(slot)的位置
int32_t slot = static_cast<uint32_t>(key) % (bucket_size());
//2.确定metainfo节点存储在文件中的偏移量
if(free_head_offset()!=0)
{
ret = file_op_->pread_file(reinterpret_cast<char*>(&tmp_meta_info), sizeof(MetaInfo), free_head_offset());
if(ret != TFS_SUCCESS)
{
return ret;
}
current_offset = index_header()->free_head_offset_;
if(debug) printf("reuse metainfo, current_offset: %d\n",current_offset);
index_header()->free_head_offset_ = tmp_meta_info.get_next_meta_offset();
}
else
{
current_offset = index_header()->index_file_size_;
index_header()->index_file_size_ += sizeof(MetaInfo);
}
current_offset = index_header()->index_file_size_;
index_header()->index_file_size_ += sizeof(MetaInfo);
//3.将metainfo写入索引文件中
meta.set_next_meta_offset(0);
ret = file_op_->pwrite_file(reinterpret_cast<const char*>(&meta), sizeof(MetaInfo), current_offset);
if(TFS_SUCCESS != ret)
{
index_header()->index_file_size_ -= sizeof(MetaInfo);
return ret;
}
//4.将metainfo节点插入到哈希链表中
//当前一个节点已经存在
if( 0 != previous_offset)
{
ret = file_op_->pread_file(reinterpret_cast<char*>(&tmp_meta_info), sizeof(MetaInfo), previous_offset);
if(TFS_SUCCESS != ret)
{
index_header()->index_file_size_ -= sizeof(MetaInfo);
return ret;
}
tmp_meta_info.set_next_meta_offset(current_offset);
ret = file_op_->pwrite_file(reinterpret_cast<const char*>(&tmp_meta_info), sizeof(MetaInfo), previous_offset);
if(TFS_SUCCESS != ret)
{
index_header()->index_file_size_ -= sizeof(MetaInfo);
return ret;
}
}
//不存在前一个节点
else
{
bucket_slot()[slot] = current_offset;
}
return TFS_SUCCESS;
}
}
}