...
Run Format

Source file src/runtime/atomic_pointer.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // These functions cannot have go:noescape annotations,
    13  // because while ptr does not escape, new does.
    14  // If new is marked as not escaping, the compiler will make incorrect
    15  // escape analysis decisions about the pointer value being stored.
    16  // Instead, these are wrappers around the actual atomics (casp1 and so on)
    17  // that use noescape to convey which arguments do not escape.
    18  
    19  // atomicwb performs a write barrier before an atomic pointer write.
    20  // The caller should guard the call with "if writeBarrier.enabled".
    21  //
    22  //go:nosplit
    23  func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
    24  	slot := (*uintptr)(unsafe.Pointer(ptr))
    25  	if !getg().m.p.ptr().wbBuf.putFast(*slot, uintptr(new)) {
    26  		wbBufFlush(slot, uintptr(new))
    27  	}
    28  }
    29  
    30  // atomicstorep performs *ptr = new atomically and invokes a write barrier.
    31  //
    32  //go:nosplit
    33  func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
    34  	if writeBarrier.enabled {
    35  		atomicwb((*unsafe.Pointer)(ptr), new)
    36  	}
    37  	atomic.StorepNoWB(noescape(ptr), new)
    38  }
    39  
    40  //go:nosplit
    41  func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
    42  	// The write barrier is only necessary if the CAS succeeds,
    43  	// but since it needs to happen before the write becomes
    44  	// public, we have to do it conservatively all the time.
    45  	if writeBarrier.enabled {
    46  		atomicwb(ptr, new)
    47  	}
    48  	return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
    49  }
    50  
    51  // Like above, but implement in terms of sync/atomic's uintptr operations.
    52  // We cannot just call the runtime routines, because the race detector expects
    53  // to be able to intercept the sync/atomic forms but not the runtime forms.
    54  
    55  //go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
    56  func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
    57  
    58  //go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
    59  //go:nosplit
    60  func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
    61  	if writeBarrier.enabled {
    62  		atomicwb(ptr, new)
    63  	}
    64  	sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
    65  }
    66  
    67  //go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr
    68  func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
    69  
    70  //go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
    71  //go:nosplit
    72  func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
    73  	if writeBarrier.enabled {
    74  		atomicwb(ptr, new)
    75  	}
    76  	old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
    77  	return old
    78  }
    79  
    80  //go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr
    81  func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
    82  
    83  //go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
    84  //go:nosplit
    85  func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
    86  	if writeBarrier.enabled {
    87  		atomicwb(ptr, new)
    88  	}
    89  	return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
    90  }
    91  

View as plain text