Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/chan.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go channels.
     8  
     9  // Invariants:
    10  //  At least one of c.sendq and c.recvq is empty,
    11  //  except for the case of an unbuffered channel with a single goroutine
    12  //  blocked on it for both sending and receiving using a select statement,
    13  //  in which case the length of c.sendq and c.recvq is limited only by the
    14  //  size of the select statement.
    15  //
    16  // For buffered channels, also:
    17  //  c.qcount > 0 implies that c.recvq is empty.
    18  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    19  
    20  import (
    21  	"runtime/internal/atomic"
    22  	"runtime/internal/math"
    23  	"unsafe"
    24  )
    25  
    26  const (
    27  	maxAlign  = 8
    28  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    29  	debugChan = false
    30  )
    31  
    32  type hchan struct {
    33  	qcount   uint           // total data in the queue
    34  	dataqsiz uint           // size of the circular queue
    35  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    36  	elemsize uint16
    37  	closed   uint32
    38  	elemtype *_type // element type
    39  	sendx    uint   // send index
    40  	recvx    uint   // receive index
    41  	recvq    waitq  // list of recv waiters
    42  	sendq    waitq  // list of send waiters
    43  
    44  	// lock protects all fields in hchan, as well as several
    45  	// fields in sudogs blocked on this channel.
    46  	//
    47  	// Do not change another G's status while holding this lock
    48  	// (in particular, do not ready a G), as this can deadlock
    49  	// with stack shrinking.
    50  	lock mutex
    51  }
    52  
    53  type waitq struct {
    54  	first *sudog
    55  	last  *sudog
    56  }
    57  
    58  //go:linkname reflect_makechan reflect.makechan
    59  func reflect_makechan(t *chantype, size int) *hchan {
    60  	return makechan(t, size)
    61  }
    62  
    63  func makechan64(t *chantype, size int64) *hchan {
    64  	if int64(int(size)) != size {
    65  		panic(plainError("makechan: size out of range"))
    66  	}
    67  
    68  	return makechan(t, int(size))
    69  }
    70  
    71  func makechan(t *chantype, size int) *hchan {
    72  	elem := t.elem
    73  
    74  	// compiler checks this but be safe.
    75  	if elem.size >= 1<<16 {
    76  		throw("makechan: invalid channel element type")
    77  	}
    78  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    79  		throw("makechan: bad alignment")
    80  	}
    81  
    82  	mem, overflow := math.MulUintptr(elem.size, uintptr(size))
    83  	if overflow || mem > maxAlloc-hchanSize || size < 0 {
    84  		panic(plainError("makechan: size out of range"))
    85  	}
    86  
    87  	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
    88  	// buf points into the same allocation, elemtype is persistent.
    89  	// SudoG's are referenced from their owning thread so they can't be collected.
    90  	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    91  	var c *hchan
    92  	switch {
    93  	case mem == 0:
    94  		// Queue or element size is zero.
    95  		c = (*hchan)(mallocgc(hchanSize, nil, true))
    96  		// Race detector uses this location for synchronization.
    97  		c.buf = c.raceaddr()
    98  	case elem.ptrdata == 0:
    99  		// Elements do not contain pointers.
   100  		// Allocate hchan and buf in one call.
   101  		c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
   102  		c.buf = add(unsafe.Pointer(c), hchanSize)
   103  	default:
   104  		// Elements contain pointers.
   105  		c = new(hchan)
   106  		c.buf = mallocgc(mem, elem, true)
   107  	}
   108  
   109  	c.elemsize = uint16(elem.size)
   110  	c.elemtype = elem
   111  	c.dataqsiz = uint(size)
   112  	lockInit(&c.lock, lockRankHchan)
   113  
   114  	if debugChan {
   115  		print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
   116  	}
   117  	return c
   118  }
   119  
   120  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
   121  func chanbuf(c *hchan, i uint) unsafe.Pointer {
   122  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   123  }
   124  
   125  // full reports whether a send on c would block (that is, the channel is full).
   126  // It uses a single word-sized read of mutable state, so although
   127  // the answer is instantaneously true, the correct answer may have changed
   128  // by the time the calling function receives the return value.
   129  func full(c *hchan) bool {
   130  	// c.dataqsiz is immutable (never written after the channel is created)
   131  	// so it is safe to read at any time during channel operation.
   132  	if c.dataqsiz == 0 {
   133  		// Assumes that a pointer read is relaxed-atomic.
   134  		return c.recvq.first == nil
   135  	}
   136  	// Assumes that a uint read is relaxed-atomic.
   137  	return c.qcount == c.dataqsiz
   138  }
   139  
   140  // entry point for c <- x from compiled code
   141  //go:nosplit
   142  func chansend1(c *hchan, elem unsafe.Pointer) {
   143  	chansend(c, elem, true, getcallerpc())
   144  }
   145  
   146  /*
   147   * generic single channel send/recv
   148   * If block is not nil,
   149   * then the protocol will not
   150   * sleep but return if it could
   151   * not complete.
   152   *
   153   * sleep can wake up with g.param == nil
   154   * when a channel involved in the sleep has
   155   * been closed.  it is easiest to loop and re-run
   156   * the operation; we'll see that it's now closed.
   157   */
   158  func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   159  	if c == nil {
   160  		if !block {
   161  			return false
   162  		}
   163  		gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
   164  		throw("unreachable")
   165  	}
   166  
   167  	if debugChan {
   168  		print("chansend: chan=", c, "\n")
   169  	}
   170  
   171  	if raceenabled {
   172  		racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
   173  	}
   174  
   175  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   176  	//
   177  	// After observing that the channel is not closed, we observe that the channel is
   178  	// not ready for sending. Each of these observations is a single word-sized read
   179  	// (first c.closed and second full()).
   180  	// Because a closed channel cannot transition from 'ready for sending' to
   181  	// 'not ready for sending', even if the channel is closed between the two observations,
   182  	// they imply a moment between the two when the channel was both not yet closed
   183  	// and not ready for sending. We behave as if we observed the channel at that moment,
   184  	// and report that the send cannot proceed.
   185  	//
   186  	// It is okay if the reads are reordered here: if we observe that the channel is not
   187  	// ready for sending and then observe that it is not closed, that implies that the
   188  	// channel wasn't closed during the first observation. However, nothing here
   189  	// guarantees forward progress. We rely on the side effects of lock release in
   190  	// chanrecv() and closechan() to update this thread's view of c.closed and full().
   191  	if !block && c.closed == 0 && full(c) {
   192  		return false
   193  	}
   194  
   195  	var t0 int64
   196  	if blockprofilerate > 0 {
   197  		t0 = cputicks()
   198  	}
   199  
   200  	lock(&c.lock)
   201  
   202  	if c.closed != 0 {
   203  		unlock(&c.lock)
   204  		panic(plainError("send on closed channel"))
   205  	}
   206  
   207  	if sg := c.recvq.dequeue(); sg != nil {
   208  		// Found a waiting receiver. We pass the value we want to send
   209  		// directly to the receiver, bypassing the channel buffer (if any).
   210  		send(c, sg, ep, func() { unlock(&c.lock) }, 3)
   211  		return true
   212  	}
   213  
   214  	if c.qcount < c.dataqsiz {
   215  		// Space is available in the channel buffer. Enqueue the element to send.
   216  		qp := chanbuf(c, c.sendx)
   217  		if raceenabled {
   218  			raceacquire(qp)
   219  			racerelease(qp)
   220  		}
   221  		typedmemmove(c.elemtype, qp, ep)
   222  		c.sendx++
   223  		if c.sendx == c.dataqsiz {
   224  			c.sendx = 0
   225  		}
   226  		c.qcount++
   227  		unlock(&c.lock)
   228  		return true
   229  	}
   230  
   231  	if !block {
   232  		unlock(&c.lock)
   233  		return false
   234  	}
   235  
   236  	// Block on the channel. Some receiver will complete our operation for us.
   237  	gp := getg()
   238  	mysg := acquireSudog()
   239  	mysg.releasetime = 0
   240  	if t0 != 0 {
   241  		mysg.releasetime = -1
   242  	}
   243  	// No stack splits between assigning elem and enqueuing mysg
   244  	// on gp.waiting where copystack can find it.
   245  	mysg.elem = ep
   246  	mysg.waitlink = nil
   247  	mysg.g = gp
   248  	mysg.isSelect = false
   249  	mysg.c = c
   250  	gp.waiting = mysg
   251  	gp.param = nil
   252  	c.sendq.enqueue(mysg)
   253  	// Signal to anyone trying to shrink our stack that we're about
   254  	// to park on a channel. The window between when this G's status
   255  	// changes and when we set gp.activeStackChans is not safe for
   256  	// stack shrinking.
   257  	atomic.Store8(&gp.parkingOnChan, 1)
   258  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
   259  	// Ensure the value being sent is kept alive until the
   260  	// receiver copies it out. The sudog has a pointer to the
   261  	// stack object, but sudogs aren't considered as roots of the
   262  	// stack tracer.
   263  	KeepAlive(ep)
   264  
   265  	// someone woke us up.
   266  	if mysg != gp.waiting {
   267  		throw("G waiting list is corrupted")
   268  	}
   269  	gp.waiting = nil
   270  	gp.activeStackChans = false
   271  	if gp.param == nil {
   272  		if c.closed == 0 {
   273  			throw("chansend: spurious wakeup")
   274  		}
   275  		panic(plainError("send on closed channel"))
   276  	}
   277  	gp.param = nil
   278  	if mysg.releasetime > 0 {
   279  		blockevent(mysg.releasetime-t0, 2)
   280  	}
   281  	mysg.c = nil
   282  	releaseSudog(mysg)
   283  	return true
   284  }
   285  
   286  // send processes a send operation on an empty channel c.
   287  // The value ep sent by the sender is copied to the receiver sg.
   288  // The receiver is then woken up to go on its merry way.
   289  // Channel c must be empty and locked.  send unlocks c with unlockf.
   290  // sg must already be dequeued from c.
   291  // ep must be non-nil and point to the heap or the caller's stack.
   292  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   293  	if raceenabled {
   294  		if c.dataqsiz == 0 {
   295  			racesync(c, sg)
   296  		} else {
   297  			// Pretend we go through the buffer, even though
   298  			// we copy directly. Note that we need to increment
   299  			// the head/tail locations only when raceenabled.
   300  			qp := chanbuf(c, c.recvx)
   301  			raceacquire(qp)
   302  			racerelease(qp)
   303  			raceacquireg(sg.g, qp)
   304  			racereleaseg(sg.g, qp)
   305  			c.recvx++
   306  			if c.recvx == c.dataqsiz {
   307  				c.recvx = 0
   308  			}
   309  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   310  		}
   311  	}
   312  	if sg.elem != nil {
   313  		sendDirect(c.elemtype, sg, ep)
   314  		sg.elem = nil
   315  	}
   316  	gp := sg.g
   317  	unlockf()
   318  	gp.param = unsafe.Pointer(sg)
   319  	if sg.releasetime != 0 {
   320  		sg.releasetime = cputicks()
   321  	}
   322  	goready(gp, skip+1)
   323  }
   324  
   325  // Sends and receives on unbuffered or empty-buffered channels are the
   326  // only operations where one running goroutine writes to the stack of
   327  // another running goroutine. The GC assumes that stack writes only
   328  // happen when the goroutine is running and are only done by that
   329  // goroutine. Using a write barrier is sufficient to make up for
   330  // violating that assumption, but the write barrier has to work.
   331  // typedmemmove will call bulkBarrierPreWrite, but the target bytes
   332  // are not in the heap, so that will not help. We arrange to call
   333  // memmove and typeBitsBulkBarrier instead.
   334  
   335  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   336  	// src is on our stack, dst is a slot on another stack.
   337  
   338  	// Once we read sg.elem out of sg, it will no longer
   339  	// be updated if the destination's stack gets copied (shrunk).
   340  	// So make sure that no preemption points can happen between read & use.
   341  	dst := sg.elem
   342  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   343  	// No need for cgo write barrier checks because dst is always
   344  	// Go memory.
   345  	memmove(dst, src, t.size)
   346  }
   347  
   348  func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   349  	// dst is on our stack or the heap, src is on another stack.
   350  	// The channel is locked, so src will not move during this
   351  	// operation.
   352  	src := sg.elem
   353  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   354  	memmove(dst, src, t.size)
   355  }
   356  
   357  func closechan(c *hchan) {
   358  	if c == nil {
   359  		panic(plainError("close of nil channel"))
   360  	}
   361  
   362  	lock(&c.lock)
   363  	if c.closed != 0 {
   364  		unlock(&c.lock)
   365  		panic(plainError("close of closed channel"))
   366  	}
   367  
   368  	if raceenabled {
   369  		callerpc := getcallerpc()
   370  		racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
   371  		racerelease(c.raceaddr())
   372  	}
   373  
   374  	c.closed = 1
   375  
   376  	var glist gList
   377  
   378  	// release all readers
   379  	for {
   380  		sg := c.recvq.dequeue()
   381  		if sg == nil {
   382  			break
   383  		}
   384  		if sg.elem != nil {
   385  			typedmemclr(c.elemtype, sg.elem)
   386  			sg.elem = nil
   387  		}
   388  		if sg.releasetime != 0 {
   389  			sg.releasetime = cputicks()
   390  		}
   391  		gp := sg.g
   392  		gp.param = nil
   393  		if raceenabled {
   394  			raceacquireg(gp, c.raceaddr())
   395  		}
   396  		glist.push(gp)
   397  	}
   398  
   399  	// release all writers (they will panic)
   400  	for {
   401  		sg := c.sendq.dequeue()
   402  		if sg == nil {
   403  			break
   404  		}
   405  		sg.elem = nil
   406  		if sg.releasetime != 0 {
   407  			sg.releasetime = cputicks()
   408  		}
   409  		gp := sg.g
   410  		gp.param = nil
   411  		if raceenabled {
   412  			raceacquireg(gp, c.raceaddr())
   413  		}
   414  		glist.push(gp)
   415  	}
   416  	unlock(&c.lock)
   417  
   418  	// Ready all Gs now that we've dropped the channel lock.
   419  	for !glist.empty() {
   420  		gp := glist.pop()
   421  		gp.schedlink = 0
   422  		goready(gp, 3)
   423  	}
   424  }
   425  
   426  // empty reports whether a read from c would block (that is, the channel is
   427  // empty).  It uses a single atomic read of mutable state.
   428  func empty(c *hchan) bool {
   429  	// c.dataqsiz is immutable.
   430  	if c.dataqsiz == 0 {
   431  		return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
   432  	}
   433  	return atomic.Loaduint(&c.qcount) == 0
   434  }
   435  
   436  // entry points for <- c from compiled code
   437  //go:nosplit
   438  func chanrecv1(c *hchan, elem unsafe.Pointer) {
   439  	chanrecv(c, elem, true)
   440  }
   441  
   442  //go:nosplit
   443  func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
   444  	_, received = chanrecv(c, elem, true)
   445  	return
   446  }
   447  
   448  // chanrecv receives on channel c and writes the received data to ep.
   449  // ep may be nil, in which case received data is ignored.
   450  // If block == false and no elements are available, returns (false, false).
   451  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   452  // Otherwise, fills in *ep with an element and returns (true, true).
   453  // A non-nil ep must point to the heap or the caller's stack.
   454  func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   455  	// raceenabled: don't need to check ep, as it is always on the stack
   456  	// or is new memory allocated by reflect.
   457  
   458  	if debugChan {
   459  		print("chanrecv: chan=", c, "\n")
   460  	}
   461  
   462  	if c == nil {
   463  		if !block {
   464  			return
   465  		}
   466  		gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
   467  		throw("unreachable")
   468  	}
   469  
   470  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   471  	if !block && empty(c) {
   472  		// After observing that the channel is not ready for receiving, we observe whether the
   473  		// channel is closed.
   474  		//
   475  		// Reordering of these checks could lead to incorrect behavior when racing with a close.
   476  		// For example, if the channel was open and not empty, was closed, and then drained,
   477  		// reordered reads could incorrectly indicate "open and empty". To prevent reordering,
   478  		// we use atomic loads for both checks, and rely on emptying and closing to happen in
   479  		// separate critical sections under the same lock.  This assumption fails when closing
   480  		// an unbuffered channel with a blocked send, but that is an error condition anyway.
   481  		if atomic.Load(&c.closed) == 0 {
   482  			// Because a channel cannot be reopened, the later observation of the channel
   483  			// being not closed implies that it was also not closed at the moment of the
   484  			// first observation. We behave as if we observed the channel at that moment
   485  			// and report that the receive cannot proceed.
   486  			return
   487  		}
   488  		// The channel is irreversibly closed. Re-check whether the channel has any pending data
   489  		// to receive, which could have arrived between the empty and closed checks above.
   490  		// Sequential consistency is also required here, when racing with such a send.
   491  		if empty(c) {
   492  			// The channel is irreversibly closed and empty.
   493  			if raceenabled {
   494  				raceacquire(c.raceaddr())
   495  			}
   496  			if ep != nil {
   497  				typedmemclr(c.elemtype, ep)
   498  			}
   499  			return true, false
   500  		}
   501  	}
   502  
   503  	var t0 int64
   504  	if blockprofilerate > 0 {
   505  		t0 = cputicks()
   506  	}
   507  
   508  	lock(&c.lock)
   509  
   510  	if c.closed != 0 && c.qcount == 0 {
   511  		if raceenabled {
   512  			raceacquire(c.raceaddr())
   513  		}
   514  		unlock(&c.lock)
   515  		if ep != nil {
   516  			typedmemclr(c.elemtype, ep)
   517  		}
   518  		return true, false
   519  	}
   520  
   521  	if sg := c.sendq.dequeue(); sg != nil {
   522  		// Found a waiting sender. If buffer is size 0, receive value
   523  		// directly from sender. Otherwise, receive from head of queue
   524  		// and add sender's value to the tail of the queue (both map to
   525  		// the same buffer slot because the queue is full).
   526  		recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
   527  		return true, true
   528  	}
   529  
   530  	if c.qcount > 0 {
   531  		// Receive directly from queue
   532  		qp := chanbuf(c, c.recvx)
   533  		if raceenabled {
   534  			raceacquire(qp)
   535  			racerelease(qp)
   536  		}
   537  		if ep != nil {
   538  			typedmemmove(c.elemtype, ep, qp)
   539  		}
   540  		typedmemclr(c.elemtype, qp)
   541  		c.recvx++
   542  		if c.recvx == c.dataqsiz {
   543  			c.recvx = 0
   544  		}
   545  		c.qcount--
   546  		unlock(&c.lock)
   547  		return true, true
   548  	}
   549  
   550  	if !block {
   551  		unlock(&c.lock)
   552  		return false, false
   553  	}
   554  
   555  	// no sender available: block on this channel.
   556  	gp := getg()
   557  	mysg := acquireSudog()
   558  	mysg.releasetime = 0
   559  	if t0 != 0 {
   560  		mysg.releasetime = -1
   561  	}
   562  	// No stack splits between assigning elem and enqueuing mysg
   563  	// on gp.waiting where copystack can find it.
   564  	mysg.elem = ep
   565  	mysg.waitlink = nil
   566  	gp.waiting = mysg
   567  	mysg.g = gp
   568  	mysg.isSelect = false
   569  	mysg.c = c
   570  	gp.param = nil
   571  	c.recvq.enqueue(mysg)
   572  	// Signal to anyone trying to shrink our stack that we're about
   573  	// to park on a channel. The window between when this G's status
   574  	// changes and when we set gp.activeStackChans is not safe for
   575  	// stack shrinking.
   576  	atomic.Store8(&gp.parkingOnChan, 1)
   577  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
   578  
   579  	// someone woke us up
   580  	if mysg != gp.waiting {
   581  		throw("G waiting list is corrupted")
   582  	}
   583  	gp.waiting = nil
   584  	gp.activeStackChans = false
   585  	if mysg.releasetime > 0 {
   586  		blockevent(mysg.releasetime-t0, 2)
   587  	}
   588  	closed := gp.param == nil
   589  	gp.param = nil
   590  	mysg.c = nil
   591  	releaseSudog(mysg)
   592  	return true, !closed
   593  }
   594  
   595  // recv processes a receive operation on a full channel c.
   596  // There are 2 parts:
   597  // 1) The value sent by the sender sg is put into the channel
   598  //    and the sender is woken up to go on its merry way.
   599  // 2) The value received by the receiver (the current G) is
   600  //    written to ep.
   601  // For synchronous channels, both values are the same.
   602  // For asynchronous channels, the receiver gets its data from
   603  // the channel buffer and the sender's data is put in the
   604  // channel buffer.
   605  // Channel c must be full and locked. recv unlocks c with unlockf.
   606  // sg must already be dequeued from c.
   607  // A non-nil ep must point to the heap or the caller's stack.
   608  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   609  	if c.dataqsiz == 0 {
   610  		if raceenabled {
   611  			racesync(c, sg)
   612  		}
   613  		if ep != nil {
   614  			// copy data from sender
   615  			recvDirect(c.elemtype, sg, ep)
   616  		}
   617  	} else {
   618  		// Queue is full. Take the item at the
   619  		// head of the queue. Make the sender enqueue
   620  		// its item at the tail of the queue. Since the
   621  		// queue is full, those are both the same slot.
   622  		qp := chanbuf(c, c.recvx)
   623  		if raceenabled {
   624  			raceacquire(qp)
   625  			racerelease(qp)
   626  			raceacquireg(sg.g, qp)
   627  			racereleaseg(sg.g, qp)
   628  		}
   629  		// copy data from queue to receiver
   630  		if ep != nil {
   631  			typedmemmove(c.elemtype, ep, qp)
   632  		}
   633  		// copy data from sender to queue
   634  		typedmemmove(c.elemtype, qp, sg.elem)
   635  		c.recvx++
   636  		if c.recvx == c.dataqsiz {
   637  			c.recvx = 0
   638  		}
   639  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   640  	}
   641  	sg.elem = nil
   642  	gp := sg.g
   643  	unlockf()
   644  	gp.param = unsafe.Pointer(sg)
   645  	if sg.releasetime != 0 {
   646  		sg.releasetime = cputicks()
   647  	}
   648  	goready(gp, skip+1)
   649  }
   650  
   651  func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
   652  	// There are unlocked sudogs that point into gp's stack. Stack
   653  	// copying must lock the channels of those sudogs.
   654  	// Set activeStackChans here instead of before we try parking
   655  	// because we could self-deadlock in stack growth on the
   656  	// channel lock.
   657  	gp.activeStackChans = true
   658  	// Mark that it's safe for stack shrinking to occur now,
   659  	// because any thread acquiring this G's stack for shrinking
   660  	// is guaranteed to observe activeStackChans after this store.
   661  	atomic.Store8(&gp.parkingOnChan, 0)
   662  	// Make sure we unlock after setting activeStackChans and
   663  	// unsetting parkingOnChan. The moment we unlock chanLock
   664  	// we risk gp getting readied by a channel operation and
   665  	// so gp could continue running before everything before
   666  	// the unlock is visible (even to gp itself).
   667  	unlock((*mutex)(chanLock))
   668  	return true
   669  }
   670  
   671  // compiler implements
   672  //
   673  //	select {
   674  //	case c <- v:
   675  //		... foo
   676  //	default:
   677  //		... bar
   678  //	}
   679  //
   680  // as
   681  //
   682  //	if selectnbsend(c, v) {
   683  //		... foo
   684  //	} else {
   685  //		... bar
   686  //	}
   687  //
   688  func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
   689  	return chansend(c, elem, false, getcallerpc())
   690  }
   691  
   692  // compiler implements
   693  //
   694  //	select {
   695  //	case v = <-c:
   696  //		... foo
   697  //	default:
   698  //		... bar
   699  //	}
   700  //
   701  // as
   702  //
   703  //	if selectnbrecv(&v, c) {
   704  //		... foo
   705  //	} else {
   706  //		... bar
   707  //	}
   708  //
   709  func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) {
   710  	selected, _ = chanrecv(c, elem, false)
   711  	return
   712  }
   713  
   714  // compiler implements
   715  //
   716  //	select {
   717  //	case v, ok = <-c:
   718  //		... foo
   719  //	default:
   720  //		... bar
   721  //	}
   722  //
   723  // as
   724  //
   725  //	if c != nil && selectnbrecv2(&v, &ok, c) {
   726  //		... foo
   727  //	} else {
   728  //		... bar
   729  //	}
   730  //
   731  func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   732  	// TODO(khr): just return 2 values from this function, now that it is in Go.
   733  	selected, *received = chanrecv(c, elem, false)
   734  	return
   735  }
   736  
   737  //go:linkname reflect_chansend reflect.chansend
   738  func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   739  	return chansend(c, elem, !nb, getcallerpc())
   740  }
   741  
   742  //go:linkname reflect_chanrecv reflect.chanrecv
   743  func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   744  	return chanrecv(c, elem, !nb)
   745  }
   746  
   747  //go:linkname reflect_chanlen reflect.chanlen
   748  func reflect_chanlen(c *hchan) int {
   749  	if c == nil {
   750  		return 0
   751  	}
   752  	return int(c.qcount)
   753  }
   754  
   755  //go:linkname reflectlite_chanlen internal/reflectlite.chanlen
   756  func reflectlite_chanlen(c *hchan) int {
   757  	if c == nil {
   758  		return 0
   759  	}
   760  	return int(c.qcount)
   761  }
   762  
   763  //go:linkname reflect_chancap reflect.chancap
   764  func reflect_chancap(c *hchan) int {
   765  	if c == nil {
   766  		return 0
   767  	}
   768  	return int(c.dataqsiz)
   769  }
   770  
   771  //go:linkname reflect_chanclose reflect.chanclose
   772  func reflect_chanclose(c *hchan) {
   773  	closechan(c)
   774  }
   775  
   776  func (q *waitq) enqueue(sgp *sudog) {
   777  	sgp.next = nil
   778  	x := q.last
   779  	if x == nil {
   780  		sgp.prev = nil
   781  		q.first = sgp
   782  		q.last = sgp
   783  		return
   784  	}
   785  	sgp.prev = x
   786  	x.next = sgp
   787  	q.last = sgp
   788  }
   789  
   790  func (q *waitq) dequeue() *sudog {
   791  	for {
   792  		sgp := q.first
   793  		if sgp == nil {
   794  			return nil
   795  		}
   796  		y := sgp.next
   797  		if y == nil {
   798  			q.first = nil
   799  			q.last = nil
   800  		} else {
   801  			y.prev = nil
   802  			q.first = y
   803  			sgp.next = nil // mark as removed (see dequeueSudog)
   804  		}
   805  
   806  		// if a goroutine was put on this queue because of a
   807  		// select, there is a small window between the goroutine
   808  		// being woken up by a different case and it grabbing the
   809  		// channel locks. Once it has the lock
   810  		// it removes itself from the queue, so we won't see it after that.
   811  		// We use a flag in the G struct to tell us when someone
   812  		// else has won the race to signal this goroutine but the goroutine
   813  		// hasn't removed itself from the queue yet.
   814  		if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
   815  			continue
   816  		}
   817  
   818  		return sgp
   819  	}
   820  }
   821  
   822  func (c *hchan) raceaddr() unsafe.Pointer {
   823  	// Treat read-like and write-like operations on the channel to
   824  	// happen at this address. Avoid using the address of qcount
   825  	// or dataqsiz, because the len() and cap() builtins read
   826  	// those addresses, and we don't want them racing with
   827  	// operations like close().
   828  	return unsafe.Pointer(&c.buf)
   829  }
   830  
   831  func racesync(c *hchan, sg *sudog) {
   832  	racerelease(chanbuf(c, 0))
   833  	raceacquireg(sg.g, chanbuf(c, 0))
   834  	racereleaseg(sg.g, chanbuf(c, 0))
   835  	raceacquire(chanbuf(c, 0))
   836  }
   837  

View as plain text