前言
上篇文章提到channel send/recv使用形式及场景
,其中 select单个send/recv case
与send/recv单独使用
时一致,因此本篇先从这些简单场景出发关注send
、recv
的具体处理。
更多内容分享,欢迎关注公众号:Go开发笔记
send
- 单独使用
c <- x
- select单个send case
select {
case c <- x :
}
以上两种场景send处理方式一致,底层对应func为chansend1
。
chansend1具体实现
// entry point for c <- x from compiled code
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
chansend(c, elem, true, getcallerpc())
}
注意:此时block
默认为true
。
chansend
具体实现(block为true
)
以下源码中省略了block
为false
的逻辑及部分debug及race的逻辑。
/*
* generic single channel send/recv
* If block is not nil,
* then the protocol will not
* sleep but return if it could
* not complete.
*
* sleep can wake up with g.param == nil
* when a channel involved in the sleep has
* been closed. it is easiest to loop and re-run
* the operation; we'll see that it's now closed.
*/
func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
if c == nil {
...
gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2) // 对于single chan,向nil的chan发送消息会导致当前goroutine阻塞
throw("unreachable")
}
...
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock) // 获取锁
if c.closed != 0 { // 如果chan已closed,则释放锁并panic
unlock(&c.lock)
panic(plainError("send on closed channel"))
}
if sg := c.recvq.dequeue(); sg != nil {// 如果接收等待队列中有等待的接收者,直接发送到接收者。
// Found a waiting receiver. We pass the value we want to send
// directly to the receiver, bypassing the channel buffer (if any).
send(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true
}
// 以下是没有接收者的处理
if c.qcount < c.dataqsiz {// 如果当前数据量少于缓存,即缓存还有剩余,存入缓存
// Space is available in the channel buffer. Enqueue the element to send.
qp := chanbuf(c, c.sendx)// 获取数据存入缓存的位置
if raceenabled {
racenotify(c, c.sendx, nil)
}
typedmemmove(c.elemtype, qp, ep)// 将数据存入缓存的指定位置
c.sendx++// 指向下一个位置
if c.sendx == c.dataqsiz {// 如果已达到队列大小,说明已满,重新指向开头位置
c.sendx = 0
}
c.qcount++ // 数据量+1
unlock(&c.lock)// 释放锁
return true // 发送成功
}
...
// Block on the channel. Some receiver will complete our operation for us.
// 对于无缓存或者缓存为空,将会阻塞channel
gp := getg()// 获取当前goroutine
mysg := acquireSudog()// 获取sudog用以存储当前gp的信息
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem = ep // 存入发送数据
mysg.waitlink = nil
mysg.g = gp // 存入当前goroutine
mysg.isSelect = false // 非select
mysg.c = c // 存入当前channel
gp.waiting = mysg // 相互绑定
gp.param = nil // 清空唤醒参数
c.sendq.enqueue(mysg) // 将等待存入等待列表的g加入发送列表
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
atomic.Store8(&gp.parkingOnChan, 1)// 修改当前g的状态至因chan阻塞
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)// 阻塞,并标明阻塞原因
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
// stack object, but sudogs aren't considered as roots of the
// stack tracer.
KeepAlive(ep)// 确保接收前保活
// someone woke us up. 唤醒后
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil // 清空等待状态
gp.activeStackChans = false
closed := !mysg.success // 确认是否close
gp.param = nil // 清空唤醒参数
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
mysg.c = nil // 清空channel
releaseSudog(mysg) // 释放等待列表中的g
if closed { // 如果channle关闭,发送panic
if c.closed == 0 {
throw("chansend: spurious wakeup")
}
panic(plainError("send on closed channel"))
}
return true // 发送成功
}
// send processes a send operation on an empty channel c.
// The value ep sent by the sender is copied to the receiver sg.
// The receiver is then woken up to go on its merry way.
// Channel c must be empty and locked. send unlocks c with unlockf.
// sg must already be dequeued from c.
// ep must be non-nil and point to the heap or the caller's stack.
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if raceenabled {
if c.dataqsiz == 0 {
racesync(c, sg)
} else {
// Pretend we go through the buffer, even though
// we copy directly. Note that we need to increment
// the head/tail locations only when raceenabled.
racenotify(c, c.recvx, nil)
racenotify(c, c.recvx, sg)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
}
if sg.elem != nil {// 存在接收数据地址
sendDirect(c.elemtype, sg, ep)// 直接将内存发送至接收者,直接将数据拷贝至接收地址
sg.elem = nil // 接收后,解除关联
}
gp := sg.g // 获取接收goroutine gp
unlockf()
gp.param = unsafe.Pointer(sg) // 设置唤醒参数为等待列表中的sg
sg.success = true // 设置sg的状态为成功
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, skip+1) // 接收goroutine准备运行
}
// Sends and receives on unbuffered or empty-buffered channels are the
// only operations where one running goroutine writes to the stack of
// another running goroutine. The GC assumes that stack writes only
// happen when the goroutine is running and are only done by that
// goroutine. Using a write barrier is sufficient to make up for
// violating that assumption, but the write barrier has to work.
// typedmemmove will call bulkBarrierPreWrite, but the target bytes
// are not in the heap, so that will not help. We arrange to call
// memmove and typeBitsBulkBarrier instead.
func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
// src is on our stack, dst is a slot on another stack. 跨栈拷贝
// Once we read sg.elem out of sg, it will no longer
// be updated if the destination's stack gets copied (shrunk).
// So make sure that no preemption points can happen between read & use.
dst := sg.elem
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)// 拷贝数据前添加写屏障,确保在读取和使用之间不会发生抢占点。
// No need for cgo write barrier checks because dst is always
// Go memory.
memmove(dst, src, t.size) //将数据拷贝至dst,即sg.elem(接收数据地址)中
}
此场景下使用send
时,需要注意:
block
为true- 向
nil
的chan
send
会造成当前goroutine阻塞 - 向
closed
的chan
send
会发送panic - 若已经有等待的接收者,会直接发送至接收者(直接拷贝数据到接收者)
- 若缓存未满,则继续缓存
- 若缓存已满,则会将当前goroutine封装添加到等待发送列表,然后阻塞当前goroutine,等待唤醒
recv
/close
操作均可以唤醒send
goroutine,区别在于close
后当前goroutine会panicrecv
后返回true
recv
- 单独使用
x = <- c // recv1 x可忽略
x,ok = <- c // recv2 x/ok可忽略
- select单个recv case
select{
case x = <- c: // recv1 x可忽略
}
select{
case x,ok = <- c: // recv2 x/ok可忽略
}
以上两种场景recv处理方式一致,recv1
、recv2
分别对应底层func chanrecv1
、 chanrecv2
。
chanrecv1和chanrecv2具体实现
// entry points for <- c from compiled code
//go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer) {
chanrecv(c, elem, true)
}
//go:nosplit
func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
_, received = chanrecv(c, elem, true)
return
}
chanrecv1
、chanrecv2
二者均调用了chanrecv
,区别在于是否有received
,注意block
的默认值为true
。
chanrecv
具体实现(block为true
)
以下源码中省略了block
为false
的逻辑及部分debug及race的逻辑。
// chanrecv receives on channel c and writes the received data to ep.
// ep may be nil, in which case received data is ignored.
// If block == false and no elements are available, returns (false, false).
// Otherwise, if c is closed, zeros *ep and returns (true, false).
// Otherwise, fills in *ep with an element and returns (true, true).
// A non-nil ep must point to the heap or the caller's stack.
func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
...
if c == nil {/
...
gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)// 对于单独的chan,向nil的chan接收消息会导致当前goroutine阻塞
throw("unreachable")
}
...
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.closed != 0 && c.qcount == 0 { // chan关闭且无缓存,置接收数据为chan类型的零值
...
unlock(&c.lock)
if ep != nil {
typedmemclr(c.elemtype, ep)
}
return true, false
}
if sg := c.sendq.dequeue(); sg != nil {// 如果发送队列中有等待的发送者,从缓存接收数据,将发送者数据存入缓存
// Found a waiting sender. If buffer is size 0, receive value
// directly from sender. Otherwise, receive from head of queue
// and add sender's value to the tail of the queue (both map to
// the same buffer slot because the queue is full).
recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true, true
}
if c.qcount > 0 {// 如果有缓存
// Receive directly from queue
qp := chanbuf(c, c.recvx)// 获取缓存的位置
if raceenabled {
racenotify(c, c.recvx, nil)
}
if ep != nil {// 存在接收地址,将数据拷贝至接收地址ep中
typedmemmove(c.elemtype, ep, qp)
}
typedmemclr(c.elemtype, qp) // 清空接收缓存位置的数据
c.recvx++ // 指向下一个位置
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount-- // 减少缓存数量
unlock(&c.lock)
return true, true
}
...
// no sender available: block on this channel.
// 如果没有可用的发送者,将会阻塞channel
gp := getg() // 获取当前goroutine
mysg := acquireSudog()// 获取sudog
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem = ep // 存入接收地址
mysg.waitlink = nil
gp.waiting = mysg // 记录队列中的g
mysg.g = gp // 队列中的g记录当前goroutine
mysg.isSelect = false // 非select操作
mysg.c = c // 记录当前channel
gp.param = nil // 清空唤醒参数
c.recvq.enqueue(mysg) //加入等待队列
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
atomic.Store8(&gp.parkingOnChan, 1)// 修改当前g的状态至因chan阻塞
gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2) // 阻塞,并标明阻塞原因
// someone woke us up 唤醒后
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
// 重置参数
gp.waiting = nil
gp.activeStackChans = false
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
success := mysg.success // 获取通信状态
gp.param = nil
mysg.c = nil
releaseSudog(mysg) // 释放等待列表中的g
return true, success // 返回成功
}
// recv processes a receive operation on a full channel c.
// There are 2 parts:
// 1) The value sent by the sender sg is put into the channel
// and the sender is woken up to go on its merry way.
// 2) The value received by the receiver (the current G) is
// written to ep.
// For synchronous channels, both values are the same.
// For asynchronous channels, the receiver gets its data from
// the channel buffer and the sender's data is put in the
// channel buffer.
// Channel c must be full and locked. recv unlocks c with unlockf.
// sg must already be dequeued from c.
// A non-nil ep must point to the heap or the caller's stack.
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.dataqsiz == 0 {
if raceenabled {
racesync(c, sg)
}
if ep != nil { // 对于无缓存channel,直接从发送者将发送数据拷贝至接收地址中
// copy data from sender
recvDirect(c.elemtype, sg, ep)
}
} else {
// Queue is full. Take the item at the
// head of the queue. Make the sender enqueue
// its item at the tail of the queue. Since the
// queue is full, those are both the same slot.
qp := chanbuf(c, c.recvx) // 获取缓存队列中的数据地址
if raceenabled {
racenotify(c, c.recvx, nil)
racenotify(c, c.recvx, sg)
}
// copy data from queue to receiver
if ep != nil { // 有接收地址,将缓存数据拷贝到接收地址中
typedmemmove(c.elemtype, ep, qp)
}
// copy data from sender to queue
typedmemmove(c.elemtype, qp, sg.elem) // 将数据从发送者拷贝至缓存队列中
c.recvx++ // 指向下一个缓存数据
if c.recvx == c.dataqsiz {
c.recvx = 0
}
// 发送索引指向下一个位置
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
sg.elem = nil // 清空接收数据地址
gp := sg.g // 获取发送goroutine
unlockf() // 释放锁
gp.param = unsafe.Pointer(sg)
sg.success = true // 发送成功
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, skip+1) // 准备运行发送goroutine
}
func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
// dst is on our stack or the heap, src is on another stack.
// The channel is locked, so src will not move during this
// operation.
src := sg.elem
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
memmove(dst, src, t.size) // 拷贝数据
}
单独使用recv
时,需要注意:
block
为true- 向
nil
的chan
recv
会造成send goroutine
阻塞 - 向
closed
且缓存为空
的chan
recv
会获取到chan
类型的零值 - 若已经有等待的发送者,
- 若是无缓存
chan
,会直接从发送者接收数据(从发送者直接拷贝数据到接收者) - 否则,取待接收位置的缓存,将发送数据存储至待发送位置的缓存
- 若是无缓存
- 若有缓存,则取待接收位置的缓存
- 若没有缓存,则会将当前goroutine封装添加到等待接收列表,然后阻塞当前goroutine,等待唤醒
send
/close
操作均可以唤醒send goroutine
,区别在于:close
后,若有缓存,recv
会返回缓存,若没有、缓存,recv
会返回零值send
后返回true
,true
总结
最后以一张图总结单独使用send
/recv
的处理逻辑: