...
Run Format

Source file src/runtime/race.go

Documentation: runtime

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"unsafe"
    11  )
    12  
    13  // Public race detection API, present iff build with -race.
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceErrors() int {
    21  	var n uint64
    22  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    23  	return int(n)
    24  }
    25  
    26  //go:nosplit
    27  
    28  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    29  // between goroutines. These inform the race detector about actual synchronization
    30  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    31  // sections of code).
    32  // RaceAcquire establishes a happens-before relation with the preceding
    33  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    34  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    35  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    36  func RaceAcquire(addr unsafe.Pointer) {
    37  	raceacquire(addr)
    38  }
    39  
    40  //go:nosplit
    41  
    42  // RaceRelease performs a release operation on addr that
    43  // can synchronize with a later RaceAcquire on addr.
    44  //
    45  // In terms of the C memory model, RaceRelease is equivalent to
    46  // atomic_store(memory_order_release).
    47  func RaceRelease(addr unsafe.Pointer) {
    48  	racerelease(addr)
    49  }
    50  
    51  //go:nosplit
    52  
    53  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    54  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    55  //
    56  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    57  // atomic_exchange(memory_order_release).
    58  func RaceReleaseMerge(addr unsafe.Pointer) {
    59  	racereleasemerge(addr)
    60  }
    61  
    62  //go:nosplit
    63  
    64  // RaceDisable disables handling of race synchronization events in the current goroutine.
    65  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    66  // Non-synchronization events (memory accesses, function entry/exit) still affect
    67  // the race detector.
    68  func RaceDisable() {
    69  	_g_ := getg()
    70  	if _g_.raceignore == 0 {
    71  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    72  	}
    73  	_g_.raceignore++
    74  }
    75  
    76  //go:nosplit
    77  
    78  // RaceEnable re-enables handling of race events in the current goroutine.
    79  func RaceEnable() {
    80  	_g_ := getg()
    81  	_g_.raceignore--
    82  	if _g_.raceignore == 0 {
    83  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    84  	}
    85  }
    86  
    87  // Private interface for the runtime.
    88  
    89  const raceenabled = true
    90  
    91  // For all functions accepting callerpc and pc,
    92  // callerpc is a return PC of the function that calls this function,
    93  // pc is start PC of the function that calls this function.
    94  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    95  	kind := t.kind & kindMask
    96  	if kind == kindArray || kind == kindStruct {
    97  		// for composite objects we have to read every address
    98  		// because a write might happen to any subobject.
    99  		racereadrangepc(addr, t.size, callerpc, pc)
   100  	} else {
   101  		// for non-composite objects we can read just the start
   102  		// address, as any write must write the first byte.
   103  		racereadpc(addr, callerpc, pc)
   104  	}
   105  }
   106  
   107  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   108  	kind := t.kind & kindMask
   109  	if kind == kindArray || kind == kindStruct {
   110  		// for composite objects we have to write every address
   111  		// because a write might happen to any subobject.
   112  		racewriterangepc(addr, t.size, callerpc, pc)
   113  	} else {
   114  		// for non-composite objects we can write just the start
   115  		// address, as any write must write the first byte.
   116  		racewritepc(addr, callerpc, pc)
   117  	}
   118  }
   119  
   120  //go:noescape
   121  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   122  
   123  //go:noescape
   124  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   125  
   126  type symbolizeCodeContext struct {
   127  	pc   uintptr
   128  	fn   *byte
   129  	file *byte
   130  	line uintptr
   131  	off  uintptr
   132  	res  uintptr
   133  }
   134  
   135  var qq = [...]byte{'?', '?', 0}
   136  var dash = [...]byte{'-', 0}
   137  
   138  const (
   139  	raceGetProcCmd = iota
   140  	raceSymbolizeCodeCmd
   141  	raceSymbolizeDataCmd
   142  )
   143  
   144  // Callback from C into Go, runs on g0.
   145  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   146  	switch cmd {
   147  	case raceGetProcCmd:
   148  		throw("should have been handled by racecallbackthunk")
   149  	case raceSymbolizeCodeCmd:
   150  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   151  	case raceSymbolizeDataCmd:
   152  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   153  	default:
   154  		throw("unknown command")
   155  	}
   156  }
   157  
   158  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   159  	f := FuncForPC(ctx.pc)
   160  	if f != nil {
   161  		file, line := f.FileLine(ctx.pc)
   162  		if line != 0 {
   163  			ctx.fn = cfuncname(f.funcInfo())
   164  			ctx.line = uintptr(line)
   165  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   166  			ctx.off = ctx.pc - f.Entry()
   167  			ctx.res = 1
   168  			return
   169  		}
   170  	}
   171  	ctx.fn = &qq[0]
   172  	ctx.file = &dash[0]
   173  	ctx.line = 0
   174  	ctx.off = ctx.pc
   175  	ctx.res = 1
   176  }
   177  
   178  type symbolizeDataContext struct {
   179  	addr  uintptr
   180  	heap  uintptr
   181  	start uintptr
   182  	size  uintptr
   183  	name  *byte
   184  	file  *byte
   185  	line  uintptr
   186  	res   uintptr
   187  }
   188  
   189  func raceSymbolizeData(ctx *symbolizeDataContext) {
   190  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   191  		ctx.heap = 1
   192  		ctx.start = base
   193  		ctx.size = span.elemsize
   194  		ctx.res = 1
   195  	}
   196  }
   197  
   198  // Race runtime functions called via runtime·racecall.
   199  //go:linkname __tsan_init __tsan_init
   200  var __tsan_init byte
   201  
   202  //go:linkname __tsan_fini __tsan_fini
   203  var __tsan_fini byte
   204  
   205  //go:linkname __tsan_proc_create __tsan_proc_create
   206  var __tsan_proc_create byte
   207  
   208  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   209  var __tsan_proc_destroy byte
   210  
   211  //go:linkname __tsan_map_shadow __tsan_map_shadow
   212  var __tsan_map_shadow byte
   213  
   214  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   215  var __tsan_finalizer_goroutine byte
   216  
   217  //go:linkname __tsan_go_start __tsan_go_start
   218  var __tsan_go_start byte
   219  
   220  //go:linkname __tsan_go_end __tsan_go_end
   221  var __tsan_go_end byte
   222  
   223  //go:linkname __tsan_malloc __tsan_malloc
   224  var __tsan_malloc byte
   225  
   226  //go:linkname __tsan_free __tsan_free
   227  var __tsan_free byte
   228  
   229  //go:linkname __tsan_acquire __tsan_acquire
   230  var __tsan_acquire byte
   231  
   232  //go:linkname __tsan_release __tsan_release
   233  var __tsan_release byte
   234  
   235  //go:linkname __tsan_release_merge __tsan_release_merge
   236  var __tsan_release_merge byte
   237  
   238  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   239  var __tsan_go_ignore_sync_begin byte
   240  
   241  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   242  var __tsan_go_ignore_sync_end byte
   243  
   244  //go:linkname __tsan_report_count __tsan_report_count
   245  var __tsan_report_count byte
   246  
   247  // Mimic what cmd/cgo would do.
   248  //go:cgo_import_static __tsan_init
   249  //go:cgo_import_static __tsan_fini
   250  //go:cgo_import_static __tsan_proc_create
   251  //go:cgo_import_static __tsan_proc_destroy
   252  //go:cgo_import_static __tsan_map_shadow
   253  //go:cgo_import_static __tsan_finalizer_goroutine
   254  //go:cgo_import_static __tsan_go_start
   255  //go:cgo_import_static __tsan_go_end
   256  //go:cgo_import_static __tsan_malloc
   257  //go:cgo_import_static __tsan_free
   258  //go:cgo_import_static __tsan_acquire
   259  //go:cgo_import_static __tsan_release
   260  //go:cgo_import_static __tsan_release_merge
   261  //go:cgo_import_static __tsan_go_ignore_sync_begin
   262  //go:cgo_import_static __tsan_go_ignore_sync_end
   263  //go:cgo_import_static __tsan_report_count
   264  
   265  // These are called from race_amd64.s.
   266  //go:cgo_import_static __tsan_read
   267  //go:cgo_import_static __tsan_read_pc
   268  //go:cgo_import_static __tsan_read_range
   269  //go:cgo_import_static __tsan_write
   270  //go:cgo_import_static __tsan_write_pc
   271  //go:cgo_import_static __tsan_write_range
   272  //go:cgo_import_static __tsan_func_enter
   273  //go:cgo_import_static __tsan_func_exit
   274  
   275  //go:cgo_import_static __tsan_go_atomic32_load
   276  //go:cgo_import_static __tsan_go_atomic64_load
   277  //go:cgo_import_static __tsan_go_atomic32_store
   278  //go:cgo_import_static __tsan_go_atomic64_store
   279  //go:cgo_import_static __tsan_go_atomic32_exchange
   280  //go:cgo_import_static __tsan_go_atomic64_exchange
   281  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   282  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   283  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   284  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   285  
   286  // start/end of global data (data+bss).
   287  var racedatastart uintptr
   288  var racedataend uintptr
   289  
   290  // start/end of heap for race_amd64.s
   291  var racearenastart uintptr
   292  var racearenaend uintptr
   293  
   294  func racefuncenter(uintptr)
   295  func racefuncenterfp()
   296  func racefuncexit()
   297  func racereadrangepc1(uintptr, uintptr, uintptr)
   298  func racewriterangepc1(uintptr, uintptr, uintptr)
   299  func racecallbackthunk(uintptr)
   300  
   301  // racecall allows calling an arbitrary function f from C race runtime
   302  // with up to 4 uintptr arguments.
   303  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   304  
   305  // checks if the address has shadow (i.e. heap or data/bss)
   306  //go:nosplit
   307  func isvalidaddr(addr unsafe.Pointer) bool {
   308  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   309  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   310  }
   311  
   312  //go:nosplit
   313  func raceinit() (gctx, pctx uintptr) {
   314  	// cgo is required to initialize libc, which is used by race runtime
   315  	if !iscgo {
   316  		throw("raceinit: race build must use cgo")
   317  	}
   318  
   319  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   320  
   321  	// Round data segment to page boundaries, because it's used in mmap().
   322  	start := ^uintptr(0)
   323  	end := uintptr(0)
   324  	if start > firstmoduledata.noptrdata {
   325  		start = firstmoduledata.noptrdata
   326  	}
   327  	if start > firstmoduledata.data {
   328  		start = firstmoduledata.data
   329  	}
   330  	if start > firstmoduledata.noptrbss {
   331  		start = firstmoduledata.noptrbss
   332  	}
   333  	if start > firstmoduledata.bss {
   334  		start = firstmoduledata.bss
   335  	}
   336  	if end < firstmoduledata.enoptrdata {
   337  		end = firstmoduledata.enoptrdata
   338  	}
   339  	if end < firstmoduledata.edata {
   340  		end = firstmoduledata.edata
   341  	}
   342  	if end < firstmoduledata.enoptrbss {
   343  		end = firstmoduledata.enoptrbss
   344  	}
   345  	if end < firstmoduledata.ebss {
   346  		end = firstmoduledata.ebss
   347  	}
   348  	size := round(end-start, _PageSize)
   349  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   350  	racedatastart = start
   351  	racedataend = start + size
   352  
   353  	return
   354  }
   355  
   356  var raceFiniLock mutex
   357  
   358  //go:nosplit
   359  func racefini() {
   360  	// racefini() can only be called once to avoid races.
   361  	// This eventually (via __tsan_fini) calls C.exit which has
   362  	// undefined behavior if called more than once. If the lock is
   363  	// already held it's assumed that the first caller exits the program
   364  	// so other calls can hang forever without an issue.
   365  	lock(&raceFiniLock)
   366  	racecall(&__tsan_fini, 0, 0, 0, 0)
   367  }
   368  
   369  //go:nosplit
   370  func raceproccreate() uintptr {
   371  	var ctx uintptr
   372  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   373  	return ctx
   374  }
   375  
   376  //go:nosplit
   377  func raceprocdestroy(ctx uintptr) {
   378  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   379  }
   380  
   381  //go:nosplit
   382  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   383  	if racearenastart == 0 {
   384  		racearenastart = uintptr(addr)
   385  	}
   386  	if racearenaend < uintptr(addr)+size {
   387  		racearenaend = uintptr(addr) + size
   388  	}
   389  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   390  }
   391  
   392  //go:nosplit
   393  func racemalloc(p unsafe.Pointer, sz uintptr) {
   394  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   395  }
   396  
   397  //go:nosplit
   398  func racefree(p unsafe.Pointer, sz uintptr) {
   399  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   400  }
   401  
   402  //go:nosplit
   403  func racegostart(pc uintptr) uintptr {
   404  	_g_ := getg()
   405  	var spawng *g
   406  	if _g_.m.curg != nil {
   407  		spawng = _g_.m.curg
   408  	} else {
   409  		spawng = _g_
   410  	}
   411  
   412  	var racectx uintptr
   413  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   414  	return racectx
   415  }
   416  
   417  //go:nosplit
   418  func racegoend() {
   419  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   420  }
   421  
   422  //go:nosplit
   423  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   424  	_g_ := getg()
   425  	if _g_ != _g_.m.curg {
   426  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   427  		// Not interesting.
   428  		return
   429  	}
   430  	if callpc != 0 {
   431  		racefuncenter(callpc)
   432  	}
   433  	racewriterangepc1(uintptr(addr), sz, pc)
   434  	if callpc != 0 {
   435  		racefuncexit()
   436  	}
   437  }
   438  
   439  //go:nosplit
   440  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   441  	_g_ := getg()
   442  	if _g_ != _g_.m.curg {
   443  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   444  		// Not interesting.
   445  		return
   446  	}
   447  	if callpc != 0 {
   448  		racefuncenter(callpc)
   449  	}
   450  	racereadrangepc1(uintptr(addr), sz, pc)
   451  	if callpc != 0 {
   452  		racefuncexit()
   453  	}
   454  }
   455  
   456  //go:nosplit
   457  func raceacquire(addr unsafe.Pointer) {
   458  	raceacquireg(getg(), addr)
   459  }
   460  
   461  //go:nosplit
   462  func raceacquireg(gp *g, addr unsafe.Pointer) {
   463  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   464  		return
   465  	}
   466  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   467  }
   468  
   469  //go:nosplit
   470  func racerelease(addr unsafe.Pointer) {
   471  	racereleaseg(getg(), addr)
   472  }
   473  
   474  //go:nosplit
   475  func racereleaseg(gp *g, addr unsafe.Pointer) {
   476  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   477  		return
   478  	}
   479  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   480  }
   481  
   482  //go:nosplit
   483  func racereleasemerge(addr unsafe.Pointer) {
   484  	racereleasemergeg(getg(), addr)
   485  }
   486  
   487  //go:nosplit
   488  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   489  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   490  		return
   491  	}
   492  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   493  }
   494  
   495  //go:nosplit
   496  func racefingo() {
   497  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   498  }
   499  

View as plain text