未引入虚拟节点,设置服务器数量为10、KV数据为100万:
#include<iostream>
#include<stdio.h>
#include<math.h>
#include<cstdlib>
#include<time.h>
using namespace std;
//服务器节点结构体
struct serverNode{
int id;
long long int place;
int numberOfKey;
}sn[10];
long long int point = pow(2, 32);
//生成随机key
long long int dataCreat(){
long long int key = rand();
key = key * key * 5; //经测试RAND_MAX的值为32767,直接使用会造成服务器缓存不均匀所以把随机数扩大到50亿的数量级。
return key;
}
double standardDev(struct serverNode *sn){
double average = 1000000 / 10;
double sum = 0, variance = 0;
for(int i = 0; i <= 9; i++){
variance = (sn[i].numberOfKey - average) * (sn[i].numberOfKey - average);
sum += variance;
}
return sqrt(sum / 10);
}
int main(){
long long int data = 0, dataPlace = 0;
//初始化10个服务器节点
for(int i = 0; i <= 9; i++){
sn[i].id = i + 1;
if(i == 9){
sn[i].place = point;
} else {
sn[i].place = point / 10 * (i + 1); //假设10个服务器节点的ip均匀映射在哈希环上
}
sn[i].numberOfKey = 0;
}
srand(time(0));
for(int i = 0; i < 1000000; i++){
data = dataCreat();
dataPlace = data % point;
for(int j = 0; j <= 9; j++){
if(sn[j].place > dataPlace){
sn[j].numberOfKey++;
break;
}
}
}
for(int i = 0; i <= 9; i++){
cout << "服务器" << sn[i].id << "的数据数量为:" << sn[i].numberOfKey << endl;
}
cout << "KV数据在服务器上分布数量的标准差为:" << standardDev(&sn[0]);
}
因为我将十台服务器均匀分布在Hash环上,所以不存在hash 环的倾斜,如果采用随机取余增加虚拟节点的方法可能会造成KV数据在服务器上分布数量的标准差与未引入虚拟节点的数据相似。所以我决定使用均匀映射的方法增加虚拟节点。如下图所示:
引入虚拟节点,设置服务器数量为10、虚拟节点数量为10、KV数据为100万:
#include<iostream>
#include<stdio.h>
#include<math.h>
#include<cstdlib>
#include<time.h>
using namespace std;
//服务器节点结构体
struct serverNode{
int id;
long long int place;
int numberOfKey;
}sn[10];
struct dataNode{
int id;
long long int place;
int numberOfKey;
}dn[20];
long long int point = pow(2, 32);
//生成随机key
long long int dataCreat(){
long long int key = rand();
key = key * key * 5; //经测试RAND_MAX的值为32767,直接使用会造成服务器缓存不均匀所以把随机数扩大到50亿的数量级。
return key;
}
double standardDev(struct serverNode *sn){
double average = 1000000 / 10;
double sum = 0, variance = 0;
for(int i = 0; i <= 9; i++){
variance = (sn[i].numberOfKey - average) * (sn[i].numberOfKey - average);
sum += variance;
}
return sqrt(sum / 10);
}
//将虚拟节点存储的数据映射回服务器
void merge(struct dataNode *dn, struct serverNode *sn){
for(int i = 0; i <= 9; i++){
sn[i].numberOfKey = dn[i * 2 + 1].numberOfKey + dn[(i * 2 + 4) % 20].numberOfKey;
}
}
int main(){
long long int data = 0, dataPlace = 0;
//初始化20个数据节点
for(int i = 0; i <= 19; i++){
dn[i].id = i + 1;
if(i == 19){
dn[i].place = point;
} else {
dn[i].place = point / 20 * (i + 1); //假设20个服务器节点的ip均匀映射在哈希环上
}
dn[i].numberOfKey = 0;
}
//初始化10个服务器节点
for(int i = 0; i <= 9; i++){
sn[i].id = i + 1;
}
srand(time(0));
for(int i = 0; i < 1000000; i++){
data = dataCreat();
dataPlace = data % point;
for(int j = 0; j <= 19; j++){
if(dn[j].place > dataPlace){
dn[j].numberOfKey++;
break;
}
}
}
for(int i = 0; i <= 19; i++){
cout << "节点" << dn[i].id << "的数据数量为:" << dn[i].numberOfKey << endl;
}
merge(&dn[0], &sn[0]);
for(int i = 0; i <= 9; i++){
cout << "服务器" << sn[i].id << "的数据数量为:" << sn[i].numberOfKey << endl;
}
cout << "KV数据在服务器上分布数量的标准差为:" << standardDev(&sn[0]);
}
根据引入虚拟节点前后KV数据在服务器上分布数量的标准差可评估算法的存储负载的不均衡性。具体结论请自行验证,如有错误恳请指出,谢谢阅读!