package main
import (
"fmt"
"github.com/cespare/xxhash/v2"
"math/rand"
"strconv"
"sync"
"time"
)
type Value struct {
Mu sync.Mutex
Str []string
Count int
}
var (
goCnt uint64 = 10
loopCnt uint64 = 1000000
length = goCnt * loopCnt
)
func GetHash(data []byte) uint64 {
hash := xxhash.Sum64(data) % length
return hash
}
func main() {
RowMap := make([]Value, length)
for k, _ := range RowMap {
RowMap[k].Str = make([]string, 0)
}
v := GetHash([]byte("count"))
val := &RowMap[v]
val.Mu.Lock()
val.Str = append(val.Str, "count")
val.Mu.Unlock()
var wg sync.WaitGroup
start := time.Now()
ranRange := int(length)
rand.Seed(time.Now().UnixNano())
var numOfThread uint64
for numOfThread = 0; numOfThread < goCnt; numOfThread++ {
wg.Add(1)
go func() {
defer wg.Done()
var i uint64
for i = 0; i < loopCnt; i++ {
randomNum := rand.Intn(ranRange)
keyStr := strconv.Itoa(randomNum)
hash := GetHash([]byte(keyStr))
valEntry := &RowMap[hash]
valEntry.Mu.Lock()
if len(valEntry.Str) == 0 {
valEntry.Str = append(valEntry.Str, keyStr)
} else {
exist := false
for _, v := range valEntry.Str {
if v == keyStr {
exist = true
continue
}
}
if !exist {
valEntry.Str = append(valEntry.Str, keyStr)
}
}
valEntry.Mu.Unlock()
randomNum = rand.Intn(ranRange)
keyStr = strconv.Itoa(randomNum)
hash = GetHash([]byte(keyStr))
valEntry = &RowMap[hash]
valEntry.Mu.Lock()
//for _, v := range valEntry.Str {
// fmt.Println("key : ", v)
//}
valEntry.Count++
valEntry.Mu.Unlock()
}
}()
}
wg.Wait()
t := time.Now()
elapsed := t.Sub(start)
fmt.Println("row map elapsed :", elapsed)
//for _, v := range RowMap {
// fmt.Println("key :", v.Str, ", val count :", v.Count)
//}
}
使用切片实现哈希表,每个哈希表桶拥有一个sync.Mutex。这样,每次操作哈希表时,针对是某个桶的锁。这时,锁的粒度会比sync.Map小。测试结果为:
与sync map与free cache性能对比二者相比,结果与freecache性能接近。缺陷:目前没有实现自动扩容rehash过程。