Golang源码探索----GC的实现原理(6)

推荐文章:

Golang源码探索----GC的实现原理(1)

Golang源码探索----GC的实现原理(2)

Golang源码探索----GC的实现原理(3)

Golang源码探索----GC的实现原理(4)

Golang源码探索----GC的实现原理(5)

span的sweep函数用于清扫单个span:

  1// Sweep frees or collects finalizers for blocks not marked in the mark phase.
  2// It clears the mark bits in preparation for the next GC round.
  3// Returns true if the span was returned to heap.
  4// If preserve=true, don't return it to heap nor relink in MCentral lists;
  5// caller takes care of it.
  6//TODO go:nowritebarrier
  7func (s *mspan) sweep(preserve bool) bool {
  8    // It's critical that we enter this function with preemption disabled,
  9    // GC must not start while we are in the middle of this function.
 10    _g_ := getg()
 11    if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
 12        throw("MSpan_Sweep: m is not locked")
 13    }
 14    sweepgen := mheap_.sweepgen
 15    if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
 16        print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
 17        throw("MSpan_Sweep: bad span state")
 18    }
 19    if trace.enabled {
 20        traceGCSweepSpan(s.npages * _PageSize)
 21    }
 22    // 统计已清理的页数
 23    atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
 24    spc := s.spanclass
 25    size := s.elemsize
 26    res := false
 27    c := _g_.m.mcache
 28    freeToHeap := false
 29    // The allocBits indicate which unmarked objects don't need to be
 30    // processed since they were free at the end of the last GC cycle
 31    // and were not allocated since then.
 32    // If the allocBits index is >= s.freeindex and the bit
 33    // is not marked then the object remains unallocated
 34    // since the last GC.
 35    // This situation is analogous to being on a freelist.
 36    // 判断在special中的析构器, 如果对应的对象已经不再存活则标记对象存活防止回收, 然后把析构器移到运行队列
 37    // Unlink & free special records for any objects we're about to free.
 38    // Two complications here:
 39    // 1. An object can have both finalizer and profile special records.
 40    //    In such case we need to queue finalizer for execution,
 41    //    mark the object as live and preserve the profile special.
 42    // 2. A tiny object can have several finalizers setup for different offsets.
 43    //    If such object is not marked, we need to queue all finalizers at once.
 44    // Both 1 and 2 are possible at the same time.
 45    specialp := &s.specials
 46    special := *specialp
 47    for special != nil {
 48        // A finalizer can be set for an inner byte of an object, find object beginning.
 49        objIndex := uintptr(special.offset) / size
 50        p := s.base() + objIndex*size
 51        mbits := s.markBitsForIndex(objIndex)
 52        if !mbits.isMarked() {
 53            // This object is not marked and has at least one special record.
 54            // Pass 1: see if it has at least one finalizer.
 55            hasFin := false
 56            endOffset := p - s.base() + size
 57            for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
 58                if tmp.kind == _KindSpecialFinalizer {
 59                    // Stop freeing of object if it has a finalizer.
 60                    mbits.setMarkedNonAtomic()
 61                    hasFin = true
 62                    break
 63                }
 64            }
 65            // Pass 2: queue all finalizers _or_ handle profile record.
 66            for special != nil && uintptr(special.offset) < endOffset {
 67                // Find the exact byte for which the special was setup
 68                // (as opposed to object beginning).
 69                p := s.base() + uintptr(special.offset)
 70                if special.kind == _KindSpecialFinalizer || !hasFin {
 71                    // Splice out special record.
 72                    y := special
 73                    special = special.next
 74                    *specialp = special
 75                    freespecial(y, unsafe.Pointer(p), size)
 76                } else {
 77                    // This is profile record, but the object has finalizers (so kept alive).
 78                    // Keep special record.
 79                    specialp = &special.next
 80                    special = *specialp
 81                }
 82            }
 83        } else {
 84            // object is still live: keep special record
 85            specialp = &special.next
 86            special = *specialp
 87        }
 88    }
 89    // 除错用
 90    if debug.allocfreetrace != 0 || raceenabled || msanenabled {
 91        // Find all newly freed objects. This doesn't have to
 92        // efficient; allocfreetrace has massive overhead.
 93        mbits := s.markBitsForBase()
 94        abits := s.allocBitsForIndex(0)
 95        for i := uintptr(0); i < s.nelems; i++ {
 96            if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
 97                x := s.base() + i*s.elemsize
 98                if debug.allocfreetrace != 0 {
 99                    tracefree(unsafe.Pointer(x), size)
100                }
101                if raceenabled {
102                    racefree(unsafe.Pointer(x), size)
103                }
104                if msanenabled {
105                    msanfree(unsafe.Pointer(x), size)
106                }
107            }
108            mbits.advance()
109            abits.advance()
110        }
111    }
112    // 计算释放的对象数量
113    // Count the number of free objects in this span.
114    nalloc := uint16(s.countAlloc())
115    if spc.sizeclass() == 0 && nalloc == 0 {
116        // 如果span的类型是0(大对象)并且其中的对象已经不存活则释放到heap
117        s.needzero = 1
118        freeToHeap = true
119    }
120    nfreed := s.allocCount - nalloc
121    if nalloc > s.allocCount {
122        print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
123        throw("sweep increased allocation count")
124    }
125    // 设置新的allocCount
126    s.allocCount = nalloc
127    // 判断span是否无未分配的对象
128    wasempty := s.nextFreeIndex() == s.nelems
129    // 重置freeindex, 下次分配从0开始搜索
130    s.freeindex = 0 // reset allocation index to start of span.
131    if trace.enabled {
132        getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
133    }
134    // gcmarkBits变为新的allocBits
135    // 然后重新分配一块全部为0的gcmarkBits
136    // 下次分配对象时可以根据allocBits得知哪些元素是未分配的
137    // gcmarkBits becomes the allocBits.
138    // get a fresh cleared gcmarkBits in preparation for next GC
139    s.allocBits = s.gcmarkBits
140    s.gcmarkBits = newMarkBits(s.nelems)
141    // 更新freeindex开始的allocCache
142    // Initialize alloc bits cache.
143    s.refillAllocCache(0)
144    // 如果span中已经无存活的对象则更新sweepgen到最新
145    // 下面会把span加到mcentral或者mheap
146    // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
147    // because of the potential for a concurrent free/SetFinalizer.
148    // But we need to set it before we make the span available for allocation
149    // (return it to heap or mcentral), because allocation code assumes that a
150    // span is already swept if available for allocation.
151    if freeToHeap || nfreed == 0 {
152        // The span must be in our exclusive ownership until we update sweepgen,
153        // check for potential races.
154        if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
155            print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
156            throw("MSpan_Sweep: bad span state after sweep")
157        }
158        // Serialization point.
159        // At this point the mark bits are cleared and allocation ready
160        // to go so release the span.
161        atomic.Store(&s.sweepgen, sweepgen)
162    }
163    if nfreed > 0 && spc.sizeclass() != 0 {
164        // 把span加到mcentral, res等于是否添加成功
165        c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
166        res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
167        // freeSpan会更新sweepgen
168        // MCentral_FreeSpan updates sweepgen
169    } else if freeToHeap {
170        // 把span释放到mheap
171        // Free large span to heap
172        // NOTE(rsc,dvyukov): The original implementation of efence
173        // in CL 22060046 used SysFree instead of SysFault, so that
174        // the operating system would eventually give the memory
175        // back to us again, so that an efence program could run
176        // longer without running out of memory. Unfortunately,
177        // calling SysFree here without any kind of adjustment of the
178        // heap data structures means that when the memory does
179        // come back to us, we have the wrong metadata for it, either in
180        // the MSpan structures or in the garbage collection bitmap.
181        // Using SysFault here means that the program will run out of
182        // memory fairly quickly in efence mode, but at least it won't
183        // have mysterious crashes due to confused memory reuse.
184        // It should be possible to switch back to SysFree if we also
185        // implement and then call some kind of MHeap_DeleteSpan.
186        if debug.efence > 0 {
187            s.limit = 0 // prevent mlookup from finding this span
188            sysFault(unsafe.Pointer(s.base()), size)
189        } else {
190            mheap_.freeSpan(s, 1)
191        }
192        c.local_nlargefree++
193        c.local_largefree += size
194        res = true
195    }
196    // 如果span未加到mcentral或者未释放到mheap, 则表示span仍在使用
197    if !res {
198        // 把仍在使用的span加到sweepSpans的"已清扫"队列中
199        // The span has been swept and is still in-use, so put
200        // it on the swept in-use list.
201        mheap_.sweepSpans[sweepgen/2%2].push(s)
202    }
203    return res
204}

从bgsweep和前面的分配器可以看出扫描阶段的工作是十分懒惰(lazy)的,
实际可能会出现前一阶段的扫描还未完成, 就需要开始新一轮的GC的情况,
所以每一轮GC开始之前都需要完成前一轮GC的扫描工作(Sweep Termination阶段).

GC的整个流程都分析完毕了, 最后贴上写屏障函数writebarrierptr的实现:

 1// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
 2// but if we do that, Go inserts a write barrier on *dst = src.
 3//go:nosplit
 4func writebarrierptr(dst *uintptr, src uintptr) {
 5    if writeBarrier.cgo {
 6        cgoCheckWriteBarrier(dst, src)
 7    }
 8    if !writeBarrier.needed {
 9        *dst = src
10        return
11    }
12    if src != 0 && src < minPhysPageSize {
13        systemstack(func() {
14            print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
15            throw("bad pointer in write barrier")
16        })
17    }
18    // 标记指针
19    writebarrierptr_prewrite1(dst, src)
20    // 设置指针到目标
21    *dst = src
22}

writebarrierptr_prewrite1函数如下:

 1// writebarrierptr_prewrite1 invokes a write barrier for *dst = src
 2// prior to the write happening.
 3//
 4// Write barrier calls must not happen during critical GC and scheduler
 5// related operations. In particular there are times when the GC assumes
 6// that the world is stopped but scheduler related code is still being
 7// executed, dealing with syscalls, dealing with putting gs on runnable
 8// queues and so forth. This code cannot execute write barriers because
 9// the GC might drop them on the floor. Stopping the world involves removing
10// the p associated with an m. We use the fact that m.p == nil to indicate
11// that we are in one these critical p and throw if the write is of
12// a pointer to a heap object.
13//go:nosplit
14func writebarrierptr_prewrite1(dst *uintptr, src uintptr) {
15    mp := acquirem()
16    if mp.inwb || mp.dying > 0 {
17        releasem(mp)
18        return
19    }
20    systemstack(func() {
21        if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
22            throw("writebarrierptr_prewrite1 called with mp.p == nil")
23        }
24        mp.inwb = true
25        gcmarkwb_m(dst, src)
26    })
27    mp.inwb = false
28    releasem(mp)
29}

gcmarkwb_m函数如下:

 1func gcmarkwb_m(slot *uintptr, ptr uintptr) {
 2    if writeBarrier.needed {
 3        // Note: This turns bad pointer writes into bad
 4        // pointer reads, which could be confusing. We avoid
 5        // reading from obviously bad pointers, which should
 6        // take care of the vast majority of these. We could
 7        // patch this up in the signal handler, or use XCHG to
 8        // combine the read and the write. Checking inheap is
 9        // insufficient since we need to track changes to
10        // roots outside the heap.
11        //
12        // Note: profbuf.go omits a barrier during signal handler
13        // profile logging; that's safe only because this deletion barrier exists.
14        // If we remove the deletion barrier, we'll have to work out
15        // a new way to handle the profile logging.
16        if slot1 := uintptr(unsafe.Pointer(slot)); slot1 >= minPhysPageSize {
17            if optr := *slot; optr != 0 {
18                // 标记旧指针
19                shade(optr)
20            }
21        }
22        // TODO: Make this conditional on the caller's stack color.
23        if ptr != 0 && inheap(ptr) {
24            // 标记新指针
25            shade(ptr)
26        }
27    }
28}

shade函数如下:

 1// Shade the object if it isn't already.
 2// The object is not nil and known to be in the heap.
 3// Preemption must be disabled.
 4//go:nowritebarrier
 5func shade(b uintptr) {
 6    if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 {
 7        gcw := &getg().m.p.ptr().gcw
 8        // 标记一个对象存活, 并把它加到标记队列(该对象变为灰色)
 9        greyobject(obj, 0, 0, hbits, span, gcw, objIndex)
10        // 如果标记了禁止本地标记队列则flush到全局标记队列
11        if gcphase == _GCmarktermination || gcBlackenPromptly {
12            // Ps aren't allowed to cache work during mark
13            // termination.
14            gcw.dispose()
15        }
16    }
17}
18参考链接
19https://github.com/golang/go
20https://making.pusher.com/golangs-real-time-gc-in-theory-and-practice
21https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
22https://golang.org/s/go15gcpacing
23https://golang.org/ref/mem
24https://talks.golang.org/2015/go-gc.pdf
25https://docs.google.com/document/d/1ETuA2IOmnaQ4j81AtTGT40Y4_Jr6_IDASEKg0t0dBR8/edit#heading=h.x4kziklnb8fr
26https://go-review.googlesource.com/c/go/+/21503
27http://www.cnblogs.com/diegodu/p/5803202.html
28http://legendtkl.com/2017/04/28/golang-gc
29https://lengzzz.com/note/gc-in-golang
30Golang的GC和CoreCLR的GC对比
31因为我之前已经对CoreCLR的GC做过分析(看这一篇和这一篇), 这里我可以简单的对比一下CoreCLR和GO的GC实现:
32CoreCLR的对象带有类型信息, GO的对象不带, 而是通过bitmap区域记录哪些地方包含指针
33CoreCLR分配对象的速度明显更快, GO分配对象需要查找span和写入bitmap区域
34CoreCLR的收集器需要做的工作比GO多很多
35CoreCLR不同大小的对象都会放在一个segment中, 只能线性扫描
36CoreCLR判断对象引用要访问类型信息, 而go只需要访问bitmap
37CoreCLR清扫时要一个个去标记为自由对象, 而go只需要切换allocBits
38CoreCLR的停顿时间比GO要长
39虽然CoreCLR支持并行GC, 但是没有GO彻底, GO连扫描根对象都不需要完全停顿
40CoreCLR支持分代GC
41虽然Full GC时CoreCLR的效率不如GO, 但是CoreCLR可以在大部分时候只扫描第0和第1代的对象
42因为支持分代GC, 通常CoreCLR花在GC上的CPU时间会比GO要少
43CoreCLR的分配器和收集器通常比GO要高效, 也就是说CoreCLR会有更高的吞吐量.
44但CoreCLR的最大停顿时间不如GO短, 这是因为GO的GC整个设计都是为了减少停顿时间.
45现在分布式计算和横向扩展越来越流行,
46比起追求单机吞吐量, 追求低延迟然后让分布式解决吞吐量问题无疑是更明智的选择,
47GO的设计目标使得它比其他语言都更适合编写网络服务程序.

版权申明:内容来源网络,版权归原创者所有。除非无法确认,我们都会标明作者及出处,如有侵权烦请告知,我们会立即删除并表示歉意。谢谢。


Golang语言社区

ID:GolangWeb

www.ByteEdu.Com

游戏服务器架构丨分布式技术丨大数据丨游戏算法学习

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值