...
Run Format

Source file src/runtime/chan.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	// This file contains the implementation of Go channels.
     8	
     9	// Invariants:
    10	//  At least one of c.sendq and c.recvq is empty,
    11	//  except for the case of an unbuffered channel with a single goroutine
    12	//  blocked on it for both sending and receiving using a select statement,
    13	//  in which case the length of c.sendq and c.recvq is limited only by the
    14	//  size of the select statement.
    15	//
    16	// For buffered channels, also:
    17	//  c.qcount > 0 implies that c.recvq is empty.
    18	//  c.qcount < c.dataqsiz implies that c.sendq is empty.
    19	
    20	import (
    21		"runtime/internal/atomic"
    22		"unsafe"
    23	)
    24	
    25	const (
    26		maxAlign  = 8
    27		hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    28		debugChan = false
    29	)
    30	
    31	type hchan struct {
    32		qcount   uint           // total data in the queue
    33		dataqsiz uint           // size of the circular queue
    34		buf      unsafe.Pointer // points to an array of dataqsiz elements
    35		elemsize uint16
    36		closed   uint32
    37		elemtype *_type // element type
    38		sendx    uint   // send index
    39		recvx    uint   // receive index
    40		recvq    waitq  // list of recv waiters
    41		sendq    waitq  // list of send waiters
    42	
    43		// lock protects all fields in hchan, as well as several
    44		// fields in sudogs blocked on this channel.
    45		//
    46		// Do not change another G's status while holding this lock
    47		// (in particular, do not ready a G), as this can deadlock
    48		// with stack shrinking.
    49		lock mutex
    50	}
    51	
    52	type waitq struct {
    53		first *sudog
    54		last  *sudog
    55	}
    56	
    57	//go:linkname reflect_makechan reflect.makechan
    58	func reflect_makechan(t *chantype, size int64) *hchan {
    59		return makechan(t, size)
    60	}
    61	
    62	func makechan(t *chantype, size int64) *hchan {
    63		elem := t.elem
    64	
    65		// compiler checks this but be safe.
    66		if elem.size >= 1<<16 {
    67			throw("makechan: invalid channel element type")
    68		}
    69		if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    70			throw("makechan: bad alignment")
    71		}
    72		if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) {
    73			panic(plainError("makechan: size out of range"))
    74		}
    75	
    76		var c *hchan
    77		if elem.kind&kindNoPointers != 0 || size == 0 {
    78			// Allocate memory in one call.
    79			// Hchan does not contain pointers interesting for GC in this case:
    80			// buf points into the same allocation, elemtype is persistent.
    81			// SudoG's are referenced from their owning thread so they can't be collected.
    82			// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    83			c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
    84			if size > 0 && elem.size != 0 {
    85				c.buf = add(unsafe.Pointer(c), hchanSize)
    86			} else {
    87				// race detector uses this location for synchronization
    88				// Also prevents us from pointing beyond the allocation (see issue 9401).
    89				c.buf = unsafe.Pointer(c)
    90			}
    91		} else {
    92			c = new(hchan)
    93			c.buf = newarray(elem, int(size))
    94		}
    95		c.elemsize = uint16(elem.size)
    96		c.elemtype = elem
    97		c.dataqsiz = uint(size)
    98	
    99		if debugChan {
   100			print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
   101		}
   102		return c
   103	}
   104	
   105	// chanbuf(c, i) is pointer to the i'th slot in the buffer.
   106	func chanbuf(c *hchan, i uint) unsafe.Pointer {
   107		return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   108	}
   109	
   110	// entry point for c <- x from compiled code
   111	//go:nosplit
   112	func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
   113		chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
   114	}
   115	
   116	/*
   117	 * generic single channel send/recv
   118	 * If block is not nil,
   119	 * then the protocol will not
   120	 * sleep but return if it could
   121	 * not complete.
   122	 *
   123	 * sleep can wake up with g.param == nil
   124	 * when a channel involved in the sleep has
   125	 * been closed.  it is easiest to loop and re-run
   126	 * the operation; we'll see that it's now closed.
   127	 */
   128	func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   129		if raceenabled {
   130			raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
   131		}
   132		if msanenabled {
   133			msanread(ep, t.elem.size)
   134		}
   135	
   136		if c == nil {
   137			if !block {
   138				return false
   139			}
   140			gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
   141			throw("unreachable")
   142		}
   143	
   144		if debugChan {
   145			print("chansend: chan=", c, "\n")
   146		}
   147	
   148		if raceenabled {
   149			racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
   150		}
   151	
   152		// Fast path: check for failed non-blocking operation without acquiring the lock.
   153		//
   154		// After observing that the channel is not closed, we observe that the channel is
   155		// not ready for sending. Each of these observations is a single word-sized read
   156		// (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
   157		// Because a closed channel cannot transition from 'ready for sending' to
   158		// 'not ready for sending', even if the channel is closed between the two observations,
   159		// they imply a moment between the two when the channel was both not yet closed
   160		// and not ready for sending. We behave as if we observed the channel at that moment,
   161		// and report that the send cannot proceed.
   162		//
   163		// It is okay if the reads are reordered here: if we observe that the channel is not
   164		// ready for sending and then observe that it is not closed, that implies that the
   165		// channel wasn't closed during the first observation.
   166		if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
   167			(c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
   168			return false
   169		}
   170	
   171		var t0 int64
   172		if blockprofilerate > 0 {
   173			t0 = cputicks()
   174		}
   175	
   176		lock(&c.lock)
   177	
   178		if c.closed != 0 {
   179			unlock(&c.lock)
   180			panic(plainError("send on closed channel"))
   181		}
   182	
   183		if sg := c.recvq.dequeue(); sg != nil {
   184			// Found a waiting receiver. We pass the value we want to send
   185			// directly to the receiver, bypassing the channel buffer (if any).
   186			send(c, sg, ep, func() { unlock(&c.lock) })
   187			return true
   188		}
   189	
   190		if c.qcount < c.dataqsiz {
   191			// Space is available in the channel buffer. Enqueue the element to send.
   192			qp := chanbuf(c, c.sendx)
   193			if raceenabled {
   194				raceacquire(qp)
   195				racerelease(qp)
   196			}
   197			typedmemmove(c.elemtype, qp, ep)
   198			c.sendx++
   199			if c.sendx == c.dataqsiz {
   200				c.sendx = 0
   201			}
   202			c.qcount++
   203			unlock(&c.lock)
   204			return true
   205		}
   206	
   207		if !block {
   208			unlock(&c.lock)
   209			return false
   210		}
   211	
   212		// Block on the channel. Some receiver will complete our operation for us.
   213		gp := getg()
   214		mysg := acquireSudog()
   215		mysg.releasetime = 0
   216		if t0 != 0 {
   217			mysg.releasetime = -1
   218		}
   219		// No stack splits between assigning elem and enqueuing mysg
   220		// on gp.waiting where copystack can find it.
   221		mysg.elem = ep
   222		mysg.waitlink = nil
   223		mysg.g = gp
   224		mysg.selectdone = nil
   225		mysg.c = c
   226		gp.waiting = mysg
   227		gp.param = nil
   228		c.sendq.enqueue(mysg)
   229		goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
   230	
   231		// someone woke us up.
   232		if mysg != gp.waiting {
   233			throw("G waiting list is corrupted")
   234		}
   235		gp.waiting = nil
   236		if gp.param == nil {
   237			if c.closed == 0 {
   238				throw("chansend: spurious wakeup")
   239			}
   240			panic(plainError("send on closed channel"))
   241		}
   242		gp.param = nil
   243		if mysg.releasetime > 0 {
   244			blockevent(mysg.releasetime-t0, 2)
   245		}
   246		mysg.c = nil
   247		releaseSudog(mysg)
   248		return true
   249	}
   250	
   251	// send processes a send operation on an empty channel c.
   252	// The value ep sent by the sender is copied to the receiver sg.
   253	// The receiver is then woken up to go on its merry way.
   254	// Channel c must be empty and locked.  send unlocks c with unlockf.
   255	// sg must already be dequeued from c.
   256	// ep must be non-nil and point to the heap or the caller's stack.
   257	func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
   258		if raceenabled {
   259			if c.dataqsiz == 0 {
   260				racesync(c, sg)
   261			} else {
   262				// Pretend we go through the buffer, even though
   263				// we copy directly. Note that we need to increment
   264				// the head/tail locations only when raceenabled.
   265				qp := chanbuf(c, c.recvx)
   266				raceacquire(qp)
   267				racerelease(qp)
   268				raceacquireg(sg.g, qp)
   269				racereleaseg(sg.g, qp)
   270				c.recvx++
   271				if c.recvx == c.dataqsiz {
   272					c.recvx = 0
   273				}
   274				c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   275			}
   276		}
   277		if sg.elem != nil {
   278			sendDirect(c.elemtype, sg, ep)
   279			sg.elem = nil
   280		}
   281		gp := sg.g
   282		unlockf()
   283		gp.param = unsafe.Pointer(sg)
   284		if sg.releasetime != 0 {
   285			sg.releasetime = cputicks()
   286		}
   287		goready(gp, 4)
   288	}
   289	
   290	// Sends and receives on unbuffered or empty-buffered channels are the
   291	// only operations where one running goroutine writes to the stack of
   292	// another running goroutine. The GC assumes that stack writes only
   293	// happen when the goroutine is running and are only done by that
   294	// goroutine. Using a write barrier is sufficient to make up for
   295	// violating that assumption, but the write barrier has to work.
   296	// typedmemmove will call bulkBarrierPreWrite, but the target bytes
   297	// are not in the heap, so that will not help. We arrange to call
   298	// memmove and typeBitsBulkBarrier instead.
   299	
   300	func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   301		// src is on our stack, dst is a slot on another stack.
   302	
   303		// Once we read sg.elem out of sg, it will no longer
   304		// be updated if the destination's stack gets copied (shrunk).
   305		// So make sure that no preemption points can happen between read & use.
   306		dst := sg.elem
   307		typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   308		memmove(dst, src, t.size)
   309	}
   310	
   311	func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   312		// dst is on our stack or the heap, src is on another stack.
   313		// The channel is locked, so src will not move during this
   314		// operation.
   315		src := sg.elem
   316		typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   317		memmove(dst, src, t.size)
   318	}
   319	
   320	func closechan(c *hchan) {
   321		if c == nil {
   322			panic(plainError("close of nil channel"))
   323		}
   324	
   325		lock(&c.lock)
   326		if c.closed != 0 {
   327			unlock(&c.lock)
   328			panic(plainError("close of closed channel"))
   329		}
   330	
   331		if raceenabled {
   332			callerpc := getcallerpc(unsafe.Pointer(&c))
   333			racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
   334			racerelease(unsafe.Pointer(c))
   335		}
   336	
   337		c.closed = 1
   338	
   339		var glist *g
   340	
   341		// release all readers
   342		for {
   343			sg := c.recvq.dequeue()
   344			if sg == nil {
   345				break
   346			}
   347			if sg.elem != nil {
   348				typedmemclr(c.elemtype, sg.elem)
   349				sg.elem = nil
   350			}
   351			if sg.releasetime != 0 {
   352				sg.releasetime = cputicks()
   353			}
   354			gp := sg.g
   355			gp.param = nil
   356			if raceenabled {
   357				raceacquireg(gp, unsafe.Pointer(c))
   358			}
   359			gp.schedlink.set(glist)
   360			glist = gp
   361		}
   362	
   363		// release all writers (they will panic)
   364		for {
   365			sg := c.sendq.dequeue()
   366			if sg == nil {
   367				break
   368			}
   369			sg.elem = nil
   370			if sg.releasetime != 0 {
   371				sg.releasetime = cputicks()
   372			}
   373			gp := sg.g
   374			gp.param = nil
   375			if raceenabled {
   376				raceacquireg(gp, unsafe.Pointer(c))
   377			}
   378			gp.schedlink.set(glist)
   379			glist = gp
   380		}
   381		unlock(&c.lock)
   382	
   383		// Ready all Gs now that we've dropped the channel lock.
   384		for glist != nil {
   385			gp := glist
   386			glist = glist.schedlink.ptr()
   387			gp.schedlink = 0
   388			goready(gp, 3)
   389		}
   390	}
   391	
   392	// entry points for <- c from compiled code
   393	//go:nosplit
   394	func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
   395		chanrecv(t, c, elem, true)
   396	}
   397	
   398	//go:nosplit
   399	func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
   400		_, received = chanrecv(t, c, elem, true)
   401		return
   402	}
   403	
   404	// chanrecv receives on channel c and writes the received data to ep.
   405	// ep may be nil, in which case received data is ignored.
   406	// If block == false and no elements are available, returns (false, false).
   407	// Otherwise, if c is closed, zeros *ep and returns (true, false).
   408	// Otherwise, fills in *ep with an element and returns (true, true).
   409	// A non-nil ep must point to the heap or the caller's stack.
   410	func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   411		// raceenabled: don't need to check ep, as it is always on the stack
   412		// or is new memory allocated by reflect.
   413	
   414		if debugChan {
   415			print("chanrecv: chan=", c, "\n")
   416		}
   417	
   418		if c == nil {
   419			if !block {
   420				return
   421			}
   422			gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
   423			throw("unreachable")
   424		}
   425	
   426		// Fast path: check for failed non-blocking operation without acquiring the lock.
   427		//
   428		// After observing that the channel is not ready for receiving, we observe that the
   429		// channel is not closed. Each of these observations is a single word-sized read
   430		// (first c.sendq.first or c.qcount, and second c.closed).
   431		// Because a channel cannot be reopened, the later observation of the channel
   432		// being not closed implies that it was also not closed at the moment of the
   433		// first observation. We behave as if we observed the channel at that moment
   434		// and report that the receive cannot proceed.
   435		//
   436		// The order of operations is important here: reversing the operations can lead to
   437		// incorrect behavior when racing with a close.
   438		if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
   439			c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
   440			atomic.Load(&c.closed) == 0 {
   441			return
   442		}
   443	
   444		var t0 int64
   445		if blockprofilerate > 0 {
   446			t0 = cputicks()
   447		}
   448	
   449		lock(&c.lock)
   450	
   451		if c.closed != 0 && c.qcount == 0 {
   452			if raceenabled {
   453				raceacquire(unsafe.Pointer(c))
   454			}
   455			unlock(&c.lock)
   456			if ep != nil {
   457				typedmemclr(c.elemtype, ep)
   458			}
   459			return true, false
   460		}
   461	
   462		if sg := c.sendq.dequeue(); sg != nil {
   463			// Found a waiting sender. If buffer is size 0, receive value
   464			// directly from sender. Otherwise, receive from head of queue
   465			// and add sender's value to the tail of the queue (both map to
   466			// the same buffer slot because the queue is full).
   467			recv(c, sg, ep, func() { unlock(&c.lock) })
   468			return true, true
   469		}
   470	
   471		if c.qcount > 0 {
   472			// Receive directly from queue
   473			qp := chanbuf(c, c.recvx)
   474			if raceenabled {
   475				raceacquire(qp)
   476				racerelease(qp)
   477			}
   478			if ep != nil {
   479				typedmemmove(c.elemtype, ep, qp)
   480			}
   481			typedmemclr(c.elemtype, qp)
   482			c.recvx++
   483			if c.recvx == c.dataqsiz {
   484				c.recvx = 0
   485			}
   486			c.qcount--
   487			unlock(&c.lock)
   488			return true, true
   489		}
   490	
   491		if !block {
   492			unlock(&c.lock)
   493			return false, false
   494		}
   495	
   496		// no sender available: block on this channel.
   497		gp := getg()
   498		mysg := acquireSudog()
   499		mysg.releasetime = 0
   500		if t0 != 0 {
   501			mysg.releasetime = -1
   502		}
   503		// No stack splits between assigning elem and enqueuing mysg
   504		// on gp.waiting where copystack can find it.
   505		mysg.elem = ep
   506		mysg.waitlink = nil
   507		gp.waiting = mysg
   508		mysg.g = gp
   509		mysg.selectdone = nil
   510		mysg.c = c
   511		gp.param = nil
   512		c.recvq.enqueue(mysg)
   513		goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
   514	
   515		// someone woke us up
   516		if mysg != gp.waiting {
   517			throw("G waiting list is corrupted")
   518		}
   519		gp.waiting = nil
   520		if mysg.releasetime > 0 {
   521			blockevent(mysg.releasetime-t0, 2)
   522		}
   523		closed := gp.param == nil
   524		gp.param = nil
   525		mysg.c = nil
   526		releaseSudog(mysg)
   527		return true, !closed
   528	}
   529	
   530	// recv processes a receive operation on a full channel c.
   531	// There are 2 parts:
   532	// 1) The value sent by the sender sg is put into the channel
   533	//    and the sender is woken up to go on its merry way.
   534	// 2) The value received by the receiver (the current G) is
   535	//    written to ep.
   536	// For synchronous channels, both values are the same.
   537	// For asynchronous channels, the receiver gets its data from
   538	// the channel buffer and the sender's data is put in the
   539	// channel buffer.
   540	// Channel c must be full and locked. recv unlocks c with unlockf.
   541	// sg must already be dequeued from c.
   542	// A non-nil ep must point to the heap or the caller's stack.
   543	func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
   544		if c.dataqsiz == 0 {
   545			if raceenabled {
   546				racesync(c, sg)
   547			}
   548			if ep != nil {
   549				// copy data from sender
   550				recvDirect(c.elemtype, sg, ep)
   551			}
   552		} else {
   553			// Queue is full. Take the item at the
   554			// head of the queue. Make the sender enqueue
   555			// its item at the tail of the queue. Since the
   556			// queue is full, those are both the same slot.
   557			qp := chanbuf(c, c.recvx)
   558			if raceenabled {
   559				raceacquire(qp)
   560				racerelease(qp)
   561				raceacquireg(sg.g, qp)
   562				racereleaseg(sg.g, qp)
   563			}
   564			// copy data from queue to receiver
   565			if ep != nil {
   566				typedmemmove(c.elemtype, ep, qp)
   567			}
   568			// copy data from sender to queue
   569			typedmemmove(c.elemtype, qp, sg.elem)
   570			c.recvx++
   571			if c.recvx == c.dataqsiz {
   572				c.recvx = 0
   573			}
   574			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   575		}
   576		sg.elem = nil
   577		gp := sg.g
   578		unlockf()
   579		gp.param = unsafe.Pointer(sg)
   580		if sg.releasetime != 0 {
   581			sg.releasetime = cputicks()
   582		}
   583		goready(gp, 4)
   584	}
   585	
   586	// compiler implements
   587	//
   588	//	select {
   589	//	case c <- v:
   590	//		... foo
   591	//	default:
   592	//		... bar
   593	//	}
   594	//
   595	// as
   596	//
   597	//	if selectnbsend(c, v) {
   598	//		... foo
   599	//	} else {
   600	//		... bar
   601	//	}
   602	//
   603	func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
   604		return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
   605	}
   606	
   607	// compiler implements
   608	//
   609	//	select {
   610	//	case v = <-c:
   611	//		... foo
   612	//	default:
   613	//		... bar
   614	//	}
   615	//
   616	// as
   617	//
   618	//	if selectnbrecv(&v, c) {
   619	//		... foo
   620	//	} else {
   621	//		... bar
   622	//	}
   623	//
   624	func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
   625		selected, _ = chanrecv(t, c, elem, false)
   626		return
   627	}
   628	
   629	// compiler implements
   630	//
   631	//	select {
   632	//	case v, ok = <-c:
   633	//		... foo
   634	//	default:
   635	//		... bar
   636	//	}
   637	//
   638	// as
   639	//
   640	//	if c != nil && selectnbrecv2(&v, &ok, c) {
   641	//		... foo
   642	//	} else {
   643	//		... bar
   644	//	}
   645	//
   646	func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   647		// TODO(khr): just return 2 values from this function, now that it is in Go.
   648		selected, *received = chanrecv(t, c, elem, false)
   649		return
   650	}
   651	
   652	//go:linkname reflect_chansend reflect.chansend
   653	func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   654		return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
   655	}
   656	
   657	//go:linkname reflect_chanrecv reflect.chanrecv
   658	func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   659		return chanrecv(t, c, elem, !nb)
   660	}
   661	
   662	//go:linkname reflect_chanlen reflect.chanlen
   663	func reflect_chanlen(c *hchan) int {
   664		if c == nil {
   665			return 0
   666		}
   667		return int(c.qcount)
   668	}
   669	
   670	//go:linkname reflect_chancap reflect.chancap
   671	func reflect_chancap(c *hchan) int {
   672		if c == nil {
   673			return 0
   674		}
   675		return int(c.dataqsiz)
   676	}
   677	
   678	//go:linkname reflect_chanclose reflect.chanclose
   679	func reflect_chanclose(c *hchan) {
   680		closechan(c)
   681	}
   682	
   683	func (q *waitq) enqueue(sgp *sudog) {
   684		sgp.next = nil
   685		x := q.last
   686		if x == nil {
   687			sgp.prev = nil
   688			q.first = sgp
   689			q.last = sgp
   690			return
   691		}
   692		sgp.prev = x
   693		x.next = sgp
   694		q.last = sgp
   695	}
   696	
   697	func (q *waitq) dequeue() *sudog {
   698		for {
   699			sgp := q.first
   700			if sgp == nil {
   701				return nil
   702			}
   703			y := sgp.next
   704			if y == nil {
   705				q.first = nil
   706				q.last = nil
   707			} else {
   708				y.prev = nil
   709				q.first = y
   710				sgp.next = nil // mark as removed (see dequeueSudog)
   711			}
   712	
   713			// if sgp participates in a select and is already signaled, ignore it
   714			if sgp.selectdone != nil {
   715				// claim the right to signal
   716				if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
   717					continue
   718				}
   719			}
   720	
   721			return sgp
   722		}
   723	}
   724	
   725	func racesync(c *hchan, sg *sudog) {
   726		racerelease(chanbuf(c, 0))
   727		raceacquireg(sg.g, chanbuf(c, 0))
   728		racereleaseg(sg.g, chanbuf(c, 0))
   729		raceacquire(chanbuf(c, 0))
   730	}
   731	

View as plain text