Go map 源码解读

一.map的数据结构

// A header for a Go map.
type hmap struct {
    // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
    // Make sure this stays in sync with the compiler's definition.
    count     int // # live cells == size of map.  Must be first (used by len() builtin)
    flags     uint8
    B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
    noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
    hash0     uint32 // hash seed

    buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
    oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
    nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)

    extra *mapextra // optional fields
}

// mapextra holds fields that are not present on all maps.
type mapextra struct {
    // If both key and elem do not contain pointers and are inline, then we mark bucket
    // type as containing no pointers. This avoids scanning such maps.
    // However, bmap.overflow is a pointer. In order to keep overflow buckets
    // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
    // overflow and oldoverflow are only used if key and elem do not contain pointers.
    // overflow contains overflow buckets for hmap.buckets.
    // oldoverflow contains overflow buckets for hmap.oldbuckets.
    // The indirection allows to store a pointer to the slice in hiter.
    overflow    *[]*bmap
    oldoverflow *[]*bmap

    // nextOverflow holds a pointer to a free overflow bucket.
    nextOverflow *bmap
}

// A bucket for a Go map.
type bmap struct {
    // tophash generally contains the top byte of the hash value
    // for each key in this bucket. If tophash[0] < minTopHash,
    // tophash[0] is a bucket evacuation state instead.
    tophash [bucketCnt]uint8
    // Followed by bucketCnt keys and then bucketCnt elems.
    // NOTE: packing all the keys together and then all the elems together makes the
    // code a bit more complicated than alternating key/elem/key/elem/... but it allows
    // us to eliminate padding which would be needed for, e.g., map[int64]int8.
    // Followed by an overflow pointer.
}
hmap

map的主要数据结构

字段含义
count存储的键值对数目
flags状态标志(是否处于正在写入的状态等)
B桶的数目 2^B
noverflow溢出桶数目
hash0hash随机数种子
bucketsbucket数组指针,数组的大小为2^B
oldbuckets扩容阶段用于记录旧桶用到的那些溢出桶的地址
nevacuate记录渐进式扩容阶段下一个要迁移的旧桶编号
extra指向mapextra结构体里边记录的都是溢出桶相关的信息
bmap

上面的bmap是静态结构,在编译过程中会拓展为如下结构:

type bmap struct{
	tophash 	[bucketCnt]uint8
	key			[8]<T>
	value		[8]<T>
	overflow	*bmap
}

bmap对应bucket

mapextra

// TODO

关系

请添加图片描述

二.查找

先说Go的map进行散列的方法:key和hash0进行哈希,生成64位或32位(取决于操作系统)的hash值。其中低B位决定放到哪个桶,高8位作为tophash在桶内进行比较。找桶位置的过程为与运算,2^B-1的低位为全1,和hash做与运算后刚好落入2^B个桶中的一个。这也是为什么map中桶的数量一定是2的幂次。
找到bucket后,遍历tophash找到索引,通过索引在后面的字节中找到key和value。

// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
// it will return a reference to the zero object for the elem type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
    if raceenabled && h != nil { // 竞态检测
       callerpc := getcallerpc()
       pc := abi.FuncPCABIInternal(mapaccess1)
       racereadpc(unsafe.Pointer(h), callerpc, pc)
       raceReadObjectPC(t.Key, key, callerpc, pc)
    }
    if msanenabled && h != nil { // 内存清理
       msanread(key, t.Key.Size_)
    }
    if asanenabled && h != nil { // 地址清理
       asanread(key, t.Key.Size_)
    }
    if h == nil || h.count == 0 { // 没有内容,直接返回0值
       if err := mapKeyError(t, key); err != nil {
          panic(err) // see issue 23734
       }
       return unsafe.Pointer(&zeroVal[0])
    }
    if h.flags&hashWriting != 0 { // 并发检测,map不是线程安全,源码中检测到直接中断
       fatal("concurrent map read and map write")
    }
    hash := t.Hasher(key, uintptr(h.hash0)) // 生成hash值
    m := bucketMask(h.B) // 生成掩码,如B=3,则为...0111
    b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) // 通过hash和m找到对应的桶地址,并转换为*bmap,在底层使用内存地址访问,没有调用数组。h.buckets为起始地址;hash&m为逻辑索引,乘一个Bucket的大小得到偏移量。相加得到内存地址。
    if c := h.oldbuckets; c != nil { // 桶迁移中
       if !h.sameSizeGrow() { // 不是sameSizeGrow,说明空间翻倍了,要在低的一半空间找
          // There used to be half as many buckets; mask down one more power of two.
          m >>= 1
       }
       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
       if !evacuated(oldb) {
          b = oldb
       }
    }
    top := tophash(hash)
bucketloop:
    for ; b != nil; b = b.overflow(t) { // 遍历溢出桶链表
       for i := uintptr(0); i < bucketCnt; i++ { // i为偏移量索引,遍历tophash数组
          if b.tophash[i] != top {
             if b.tophash[i] == emptyRest { // emptyRest代表当前和之后地址上都没有记录
                break bucketloop
             }
             continue
          }
          k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) // 计算key存放位置
          if t.IndirectKey() { // 存放的是key的地址
             k = *((*unsafe.Pointer)(k))
          }
          if t.Key.Equal(key, k) { // 对key进行比对(而不是hash)
             e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) // 计算value存放地址
             if t.IndirectElem() {
                e = *((*unsafe.Pointer)(e))
             }
             return e
          }
       }
    }
    return unsafe.Pointer(&zeroVal[0])
}

key -> hash:随机种子+hash算法
hash -> bucket:bucket在内存上连续,通过掩码确定索引值,乘bucketSize再加上起始地址得到一个bucket的起始地址
bucket -> value:遍历tophash得到索引值,后续同理
可以发现,参与hash计算的只有B+8位,所以即使在64位系统中,hash值为64位,也容易产生冲突

三.插入

// Like mapaccess, but allocates a slot for the key if it is not present in the map.
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
    if h == nil { // 添加记录必须要有初始化map
       panic(plainError("assignment to entry in nil map"))
    }
    if raceenabled { // 竞态检测
       callerpc := getcallerpc()
       pc := abi.FuncPCABIInternal(mapassign)
       racewritepc(unsafe.Pointer(h), callerpc, pc)
       raceReadObjectPC(t.Key, key, callerpc, pc)
    }
    if msanenabled { // 内存清理
       msanread(key, t.Key.Size_)
    }
    if asanenabled { // 地址清理
       asanread(key, t.Key.Size_)
    }
    if h.flags&hashWriting != 0 { // h.flags = 1
       fatal("concurrent map writes")
    }
    hash := t.Hasher(key, uintptr(h.hash0))

    // Set hashWriting after calling t.hasher, since t.hasher may panic,
    // in which case we have not actually done a write.
    h.flags ^= hashWriting // 加锁

    if h.buckets == nil { // buckets为空,第一次插入
       h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
    }

again:
    bucket := hash & bucketMask(h.B)
    if h.growing() {
       growWork(t, h, bucket)
    }
    b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
    top := tophash(hash)

    var inserti *uint8
    var insertk unsafe.Pointer
    var elem unsafe.Pointer
bucketloop:
    for {
       for i := uintptr(0); i < bucketCnt; i++ {
          if b.tophash[i] != top {
             if isEmpty(b.tophash[i]) && inserti == nil { // 当前bucket有空位且还没有插入,记录插入位置
                inserti = &b.tophash[i]
                insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
                elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
             }
             if b.tophash[i] == emptyRest {
                break bucketloop
             }
             continue
          }
          k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
          if t.IndirectKey() {
             k = *((*unsafe.Pointer)(k))
          }
          if !t.Key.Equal(key, k) {
             continue
          }
          // already have a mapping for key. Update it.
          if t.NeedKeyUpdate() {
             typedmemmove(t.Key, k, key)
          }
          elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
          goto done
       }
       ovf := b.overflow(t)
       if ovf == nil {
          break
       }
       b = ovf
    }

    // Did not find mapping for key. Allocate new cell & add entry.

    // If we hit the max load factor or we have too many overflow buckets,
    // and we're not already in the middle of growing, start growing.
    if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
       hashGrow(t, h)
       goto again // Growing the table invalidates everything, so try again
    }

    if inserti == nil { // 没有记录插入位置,则说明前面bucket已满,没找到位置,新建bucket插入
       // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
       newb := h.newoverflow(t, b)
       inserti = &newb.tophash[0]
       insertk = add(unsafe.Pointer(newb), dataOffset)
       elem = add(insertk, bucketCnt*uintptr(t.KeySize))
    }

    // store new key/elem at insert position
    if t.IndirectKey() {
       kmem := newobject(t.Key)
       *(*unsafe.Pointer)(insertk) = kmem
       insertk = kmem
    }
    if t.IndirectElem() {
       vmem := newobject(t.Elem)
       *(*unsafe.Pointer)(elem) = vmem
    }
    typedmemmove(t.Key, insertk, key) // 从key拷贝到insertk,类型为t.Key
    *inserti = top
    h.count++

done:
    if h.flags&hashWriting == 0 {
       fatal("concurrent map writes")
    }
    h.flags &^= hashWriting // 解锁,按位清除
    if t.IndirectElem() {
       elem = *((*unsafe.Pointer)(elem))
    }
    return elem
}

写入过程的寻址逻辑与查找基本相同,源码中主要关键点有两点:

1.对整个map加锁

这里加锁是依靠结构体字段来实现

if h.flags&hashWriting != 0 { // h.flags = ...X1XX
    fatal("concurrent map writes")
}
···
// hashWriting = ...0100,因为其他位都是0,可以保证其他状态位不变,写位置置为1
h.flags ^= hashWriting
···
// 理论上来说,前面已经设置了写标志,其他go routine无法进入mapassign方法。但是在多核状态下,内存模型和缓存一致性问题可能导致标志位的状态在不同核心之间不同步
if h.flags&hashWriting == 0 {
	fatal("concurrent map writes")
}
h.flags &^= hashWriting // 解锁,按位清除
2.冲突处理策略

Go的map所采用的冲突处理策略是开放地址法和链地址法结合,对整个桶而言是链地址法,对单个记录而言是开放地址法
在发生hash冲突时首先会看当前bucket内有无空位,如果没有空位,则新建bucket插入

四.扩容

扩容条件
  1. 负载因子 > 6.5时,也即平均每个bucket存储的键值对达到6.5个
  2. overflow数量 > 2^15时,也即overflow数量超过32768时
增量扩容

当负载因子过大时,就新建一个bucket,新的bucket长度是原来的2倍,然后旧bucket数据搬迁到新的bucket
考虑到如果map存储了数以亿计的key-value,一次性搬迁将会造成比较大的延时,Go采用逐步搬迁策略,即每次访问map时都会触发一次搬迁,每次搬迁2个键值对

evacuate:将旧桶元素搬运到新桶
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
    b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
    newbit := h.noldbuckets()
    if !evacuated(b) {
       // TODO: reuse overflow buckets instead of using new ones, if there
       // is no iterator using the old buckets.  (If !oldIterator.)

       // xy contains the x and y (low and high) evacuation destinations.
       var xy [2]evacDst
       x := &xy[0]
       x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
       x.k = add(unsafe.Pointer(x.b), dataOffset)
       x.e = add(x.k, bucketCnt*uintptr(t.KeySize))

       if !h.sameSizeGrow() {
          // Only calculate y pointers if we're growing bigger.
          // Otherwise GC can see bad pointers.
          y := &xy[1]
          y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
          y.k = add(unsafe.Pointer(y.b), dataOffset)
          y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
       }

       for ; b != nil; b = b.overflow(t) {
          k := add(unsafe.Pointer(b), dataOffset)
          e := add(k, bucketCnt*uintptr(t.KeySize))
          for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
             top := b.tophash[i]
             if isEmpty(top) {
                b.tophash[i] = evacuatedEmpty
                continue
             }
             if top < minTopHash {
                throw("bad map state")
             }
             k2 := k
             if t.IndirectKey() {
                k2 = *((*unsafe.Pointer)(k2))
             }
             var useY uint8
             if !h.sameSizeGrow() {
                // Compute hash to make our evacuation decision (whether we need
                // to send this key/elem to bucket x or bucket y).
                hash := t.Hasher(k2, uintptr(h.hash0))
                if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
                   // If key != key (NaNs), then the hash could be (and probably
                   // will be) entirely different from the old hash. Moreover,
                   // it isn't reproducible. Reproducibility is required in the
                   // presence of iterators, as our evacuation decision must
                   // match whatever decision the iterator made.
                   // Fortunately, we have the freedom to send these keys either
                   // way. Also, tophash is meaningless for these kinds of keys.
                   // We let the low bit of tophash drive the evacuation decision.
                   // We recompute a new random tophash for the next level so
                   // these keys will get evenly distributed across all buckets
                   // after multiple grows.
                   useY = top & 1
                   top = tophash(hash)
                } else {
                   if hash&newbit != 0 {
                      useY = 1
                   }
                }
             }

             if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
                throw("bad evacuatedN")
             }

             b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
             dst := &xy[useY]                 // evacuation destination

             if dst.i == bucketCnt {
                dst.b = h.newoverflow(t, dst.b)
                dst.i = 0
                dst.k = add(unsafe.Pointer(dst.b), dataOffset)
                dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
             }
             dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
             if t.IndirectKey() {
                *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
             } else {
                typedmemmove(t.Key, dst.k, k) // copy elem
             }
             if t.IndirectElem() {
                *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
             } else {
                typedmemmove(t.Elem, dst.e, e)
             }
             dst.i++
             // These updates might push these pointers past the end of the
             // key or elem arrays.  That's ok, as we have the overflow pointer
             // at the end of the bucket to protect against pointing past the
             // end of the bucket.
             dst.k = add(dst.k, uintptr(t.KeySize))
             dst.e = add(dst.e, uintptr(t.ValueSize))
          }
       }
       // Unlink the overflow buckets & clear key/elem to help GC.
       if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
          b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
          // Preserve b.tophash because the evacuation
          // state is maintained there.
          ptr := add(b, dataOffset)
          n := uintptr(t.BucketSize) - dataOffset
          memclrHasPointers(ptr, n)
       }
    }

    if oldbucket == h.nevacuate {
       advanceEvacuationMark(h, t, newbit)
    }
}
hashGrow:触发扩容
func hashGrow(t *maptype, h *hmap) {
    // If we've hit the load factor, get bigger.
    // Otherwise, there are too many overflow buckets,
    // so keep the same number of buckets and "grow" laterally.
    bigger := uint8(1)
    if !overLoadFactor(h.count+1, h.B) {
       bigger = 0
       h.flags |= sameSizeGrow
    }
    oldbuckets := h.buckets
    newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)

    flags := h.flags &^ (iterator | oldIterator)
    if h.flags&iterator != 0 {
       flags |= oldIterator
    }
    // commit the grow (atomic wrt gc)
    h.B += bigger
    h.flags = flags
    h.oldbuckets = oldbuckets
    h.buckets = newbuckets
    h.nevacuate = 0
    h.noverflow = 0

    if h.extra != nil && h.extra.overflow != nil {
       // Promote current overflow buckets to the old generation.
       if h.extra.oldoverflow != nil {
          throw("oldoverflow is not nil")
       }
       h.extra.oldoverflow = h.extra.overflow
       h.extra.overflow = nil
    }
    if nextOverflow != nil {
       if h.extra == nil {
          h.extra = new(mapextra)
       }
       h.extra.nextOverflow = nextOverflow
    }

    // the actual copying of the hash table data is done incrementally
    // by growWork() and evacuate().
}
growWork:插入时搬移
func growWork(t *maptype, h *hmap, bucket uintptr) {
    // make sure we evacuate the oldbucket corresponding
    // to the bucket we're about to use
    evacuate(t, h, bucket&h.oldbucketmask())

    // evacuate one more oldbucket to make progress on growing
    if h.growing() {
       evacuate(t, h, h.nevacuate)
    }
}

五.迭代 //TODO

  • 17
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值