Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/runtime1.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  // Keep a cached value to make gotraceback fast,
    15  // since we call it on every call to gentraceback.
    16  // The cached value is a uint32 in which the low bits
    17  // are the "crash" and "all" settings and the remaining
    18  // bits are the traceback value (0 off, 1 on, 2 include system).
    19  const (
    20  	tracebackCrash = 1 << iota
    21  	tracebackAll
    22  	tracebackShift = iota
    23  )
    24  
    25  var traceback_cache uint32 = 2 << tracebackShift
    26  var traceback_env uint32
    27  
    28  // gotraceback returns the current traceback settings.
    29  //
    30  // If level is 0, suppress all tracebacks.
    31  // If level is 1, show tracebacks, but exclude runtime frames.
    32  // If level is 2, show tracebacks including runtime frames.
    33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    34  // If crash is set, crash (core dump, etc) after tracebacking.
    35  //
    36  //go:nosplit
    37  func gotraceback() (level int32, all, crash bool) {
    38  	_g_ := getg()
    39  	t := atomic.Load(&traceback_cache)
    40  	crash = t&tracebackCrash != 0
    41  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
    42  	if _g_.m.traceback != 0 {
    43  		level = int32(_g_.m.traceback)
    44  	} else {
    45  		level = int32(t >> tracebackShift)
    46  	}
    47  	return
    48  }
    49  
    50  var (
    51  	argc int32
    52  	argv **byte
    53  )
    54  
    55  // nosplit for use in linux startup sysargs
    56  //go:nosplit
    57  func argv_index(argv **byte, i int32) *byte {
    58  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    59  }
    60  
    61  func args(c int32, v **byte) {
    62  	argc = c
    63  	argv = v
    64  	sysargs(c, v)
    65  }
    66  
    67  func goargs() {
    68  	if GOOS == "windows" {
    69  		return
    70  	}
    71  	argslice = make([]string, argc)
    72  	for i := int32(0); i < argc; i++ {
    73  		argslice[i] = gostringnocopy(argv_index(argv, i))
    74  	}
    75  }
    76  
    77  func goenvs_unix() {
    78  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    79  	// guarantee env[] will immediately follow argv. Might cause
    80  	// problems.
    81  	n := int32(0)
    82  	for argv_index(argv, argc+1+n) != nil {
    83  		n++
    84  	}
    85  
    86  	envs = make([]string, n)
    87  	for i := int32(0); i < n; i++ {
    88  		envs[i] = gostring(argv_index(argv, argc+1+i))
    89  	}
    90  }
    91  
    92  func environ() []string {
    93  	return envs
    94  }
    95  
    96  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    97  // align stack variables on 386.
    98  var test_z64, test_x64 uint64
    99  
   100  func testAtomic64() {
   101  	test_z64 = 42
   102  	test_x64 = 0
   103  	if atomic.Cas64(&test_z64, test_x64, 1) {
   104  		throw("cas64 failed")
   105  	}
   106  	if test_x64 != 0 {
   107  		throw("cas64 failed")
   108  	}
   109  	test_x64 = 42
   110  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   111  		throw("cas64 failed")
   112  	}
   113  	if test_x64 != 42 || test_z64 != 1 {
   114  		throw("cas64 failed")
   115  	}
   116  	if atomic.Load64(&test_z64) != 1 {
   117  		throw("load64 failed")
   118  	}
   119  	atomic.Store64(&test_z64, (1<<40)+1)
   120  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   121  		throw("store64 failed")
   122  	}
   123  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   124  		throw("xadd64 failed")
   125  	}
   126  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   127  		throw("xadd64 failed")
   128  	}
   129  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   130  		throw("xchg64 failed")
   131  	}
   132  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   133  		throw("xchg64 failed")
   134  	}
   135  }
   136  
   137  func check() {
   138  	var (
   139  		a     int8
   140  		b     uint8
   141  		c     int16
   142  		d     uint16
   143  		e     int32
   144  		f     uint32
   145  		g     int64
   146  		h     uint64
   147  		i, i1 float32
   148  		j, j1 float64
   149  		k     unsafe.Pointer
   150  		l     *uint16
   151  		m     [4]byte
   152  	)
   153  	type x1t struct {
   154  		x uint8
   155  	}
   156  	type y1t struct {
   157  		x1 x1t
   158  		y  uint8
   159  	}
   160  	var x1 x1t
   161  	var y1 y1t
   162  
   163  	if unsafe.Sizeof(a) != 1 {
   164  		throw("bad a")
   165  	}
   166  	if unsafe.Sizeof(b) != 1 {
   167  		throw("bad b")
   168  	}
   169  	if unsafe.Sizeof(c) != 2 {
   170  		throw("bad c")
   171  	}
   172  	if unsafe.Sizeof(d) != 2 {
   173  		throw("bad d")
   174  	}
   175  	if unsafe.Sizeof(e) != 4 {
   176  		throw("bad e")
   177  	}
   178  	if unsafe.Sizeof(f) != 4 {
   179  		throw("bad f")
   180  	}
   181  	if unsafe.Sizeof(g) != 8 {
   182  		throw("bad g")
   183  	}
   184  	if unsafe.Sizeof(h) != 8 {
   185  		throw("bad h")
   186  	}
   187  	if unsafe.Sizeof(i) != 4 {
   188  		throw("bad i")
   189  	}
   190  	if unsafe.Sizeof(j) != 8 {
   191  		throw("bad j")
   192  	}
   193  	if unsafe.Sizeof(k) != sys.PtrSize {
   194  		throw("bad k")
   195  	}
   196  	if unsafe.Sizeof(l) != sys.PtrSize {
   197  		throw("bad l")
   198  	}
   199  	if unsafe.Sizeof(x1) != 1 {
   200  		throw("bad unsafe.Sizeof x1")
   201  	}
   202  	if unsafe.Offsetof(y1.y) != 1 {
   203  		throw("bad offsetof y1.y")
   204  	}
   205  	if unsafe.Sizeof(y1) != 2 {
   206  		throw("bad unsafe.Sizeof y1")
   207  	}
   208  
   209  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   210  		throw("bad timediv")
   211  	}
   212  
   213  	var z uint32
   214  	z = 1
   215  	if !atomic.Cas(&z, 1, 2) {
   216  		throw("cas1")
   217  	}
   218  	if z != 2 {
   219  		throw("cas2")
   220  	}
   221  
   222  	z = 4
   223  	if atomic.Cas(&z, 5, 6) {
   224  		throw("cas3")
   225  	}
   226  	if z != 4 {
   227  		throw("cas4")
   228  	}
   229  
   230  	z = 0xffffffff
   231  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   232  		throw("cas5")
   233  	}
   234  	if z != 0xfffffffe {
   235  		throw("cas6")
   236  	}
   237  
   238  	m = [4]byte{1, 1, 1, 1}
   239  	atomic.Or8(&m[1], 0xf0)
   240  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   241  		throw("atomicor8")
   242  	}
   243  
   244  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   245  	atomic.And8(&m[1], 0x1)
   246  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   247  		throw("atomicand8")
   248  	}
   249  
   250  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   251  	if j == j {
   252  		throw("float64nan")
   253  	}
   254  	if !(j != j) {
   255  		throw("float64nan1")
   256  	}
   257  
   258  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   259  	if j == j1 {
   260  		throw("float64nan2")
   261  	}
   262  	if !(j != j1) {
   263  		throw("float64nan3")
   264  	}
   265  
   266  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   267  	if i == i {
   268  		throw("float32nan")
   269  	}
   270  	if i == i {
   271  		throw("float32nan1")
   272  	}
   273  
   274  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   275  	if i == i1 {
   276  		throw("float32nan2")
   277  	}
   278  	if i == i1 {
   279  		throw("float32nan3")
   280  	}
   281  
   282  	testAtomic64()
   283  
   284  	if _FixedStack != round2(_FixedStack) {
   285  		throw("FixedStack is not power-of-2")
   286  	}
   287  
   288  	if !checkASM() {
   289  		throw("assembly checks failed")
   290  	}
   291  }
   292  
   293  type dbgVar struct {
   294  	name  string
   295  	value *int32
   296  }
   297  
   298  // Holds variables parsed from GODEBUG env var,
   299  // except for "memprofilerate" since there is an
   300  // existing int var for that value, which may
   301  // already have an initial value.
   302  var debug struct {
   303  	cgocheck           int32
   304  	clobberfree        int32
   305  	efence             int32
   306  	gccheckmark        int32
   307  	gcpacertrace       int32
   308  	gcshrinkstackoff   int32
   309  	gcstoptheworld     int32
   310  	gctrace            int32
   311  	invalidptr         int32
   312  	madvdontneed       int32 // for Linux; issue 28466
   313  	scavenge           int32
   314  	scavtrace          int32
   315  	scheddetail        int32
   316  	schedtrace         int32
   317  	tracebackancestors int32
   318  	asyncpreemptoff    int32
   319  
   320  	// debug.malloc is used as a combined debug check
   321  	// in the malloc function and should be set
   322  	// if any of the below debug options is != 0.
   323  	malloc         bool
   324  	allocfreetrace int32
   325  	inittrace      int32
   326  	sbrk           int32
   327  }
   328  
   329  var dbgvars = []dbgVar{
   330  	{"allocfreetrace", &debug.allocfreetrace},
   331  	{"clobberfree", &debug.clobberfree},
   332  	{"cgocheck", &debug.cgocheck},
   333  	{"efence", &debug.efence},
   334  	{"gccheckmark", &debug.gccheckmark},
   335  	{"gcpacertrace", &debug.gcpacertrace},
   336  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   337  	{"gcstoptheworld", &debug.gcstoptheworld},
   338  	{"gctrace", &debug.gctrace},
   339  	{"invalidptr", &debug.invalidptr},
   340  	{"madvdontneed", &debug.madvdontneed},
   341  	{"sbrk", &debug.sbrk},
   342  	{"scavenge", &debug.scavenge},
   343  	{"scavtrace", &debug.scavtrace},
   344  	{"scheddetail", &debug.scheddetail},
   345  	{"schedtrace", &debug.schedtrace},
   346  	{"tracebackancestors", &debug.tracebackancestors},
   347  	{"asyncpreemptoff", &debug.asyncpreemptoff},
   348  	{"inittrace", &debug.inittrace},
   349  }
   350  
   351  func parsedebugvars() {
   352  	// defaults
   353  	debug.cgocheck = 1
   354  	debug.invalidptr = 1
   355  	if GOOS == "linux" {
   356  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   357  		// but doesn't affect many of the statistics that
   358  		// MADV_DONTNEED does until the memory is actually
   359  		// reclaimed. This generally leads to poor user
   360  		// experience, like confusing stats in top and other
   361  		// monitoring tools; and bad integration with
   362  		// management systems that respond to memory usage.
   363  		// Hence, default to MADV_DONTNEED.
   364  		debug.madvdontneed = 1
   365  	}
   366  
   367  	for p := gogetenv("GODEBUG"); p != ""; {
   368  		field := ""
   369  		i := bytealg.IndexByteString(p, ',')
   370  		if i < 0 {
   371  			field, p = p, ""
   372  		} else {
   373  			field, p = p[:i], p[i+1:]
   374  		}
   375  		i = bytealg.IndexByteString(field, '=')
   376  		if i < 0 {
   377  			continue
   378  		}
   379  		key, value := field[:i], field[i+1:]
   380  
   381  		// Update MemProfileRate directly here since it
   382  		// is int, not int32, and should only be updated
   383  		// if specified in GODEBUG.
   384  		if key == "memprofilerate" {
   385  			if n, ok := atoi(value); ok {
   386  				MemProfileRate = n
   387  			}
   388  		} else {
   389  			for _, v := range dbgvars {
   390  				if v.name == key {
   391  					if n, ok := atoi32(value); ok {
   392  						*v.value = n
   393  					}
   394  				}
   395  			}
   396  		}
   397  	}
   398  
   399  	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
   400  
   401  	setTraceback(gogetenv("GOTRACEBACK"))
   402  	traceback_env = traceback_cache
   403  }
   404  
   405  //go:linkname setTraceback runtime/debug.SetTraceback
   406  func setTraceback(level string) {
   407  	var t uint32
   408  	switch level {
   409  	case "none":
   410  		t = 0
   411  	case "single", "":
   412  		t = 1 << tracebackShift
   413  	case "all":
   414  		t = 1<<tracebackShift | tracebackAll
   415  	case "system":
   416  		t = 2<<tracebackShift | tracebackAll
   417  	case "crash":
   418  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   419  	default:
   420  		t = tracebackAll
   421  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   422  			t |= uint32(n) << tracebackShift
   423  		}
   424  	}
   425  	// when C owns the process, simply exit'ing the process on fatal errors
   426  	// and panics is surprising. Be louder and abort instead.
   427  	if islibrary || isarchive {
   428  		t |= tracebackCrash
   429  	}
   430  
   431  	t |= traceback_env
   432  
   433  	atomic.Store(&traceback_cache, t)
   434  }
   435  
   436  // Poor mans 64-bit division.
   437  // This is a very special function, do not use it if you are not sure what you are doing.
   438  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   439  // Handles overflow in a time-specific manner.
   440  // This keeps us within no-split stack limits on 32-bit processors.
   441  //go:nosplit
   442  func timediv(v int64, div int32, rem *int32) int32 {
   443  	res := int32(0)
   444  	for bit := 30; bit >= 0; bit-- {
   445  		if v >= int64(div)<<uint(bit) {
   446  			v = v - (int64(div) << uint(bit))
   447  			// Before this for loop, res was 0, thus all these
   448  			// power of 2 increments are now just bitsets.
   449  			res |= 1 << uint(bit)
   450  		}
   451  	}
   452  	if v >= int64(div) {
   453  		if rem != nil {
   454  			*rem = 0
   455  		}
   456  		return 0x7fffffff
   457  	}
   458  	if rem != nil {
   459  		*rem = int32(v)
   460  	}
   461  	return res
   462  }
   463  
   464  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   465  
   466  //go:nosplit
   467  func acquirem() *m {
   468  	_g_ := getg()
   469  	_g_.m.locks++
   470  	return _g_.m
   471  }
   472  
   473  //go:nosplit
   474  func releasem(mp *m) {
   475  	_g_ := getg()
   476  	mp.locks--
   477  	if mp.locks == 0 && _g_.preempt {
   478  		// restore the preemption request in case we've cleared it in newstack
   479  		_g_.stackguard0 = stackPreempt
   480  	}
   481  }
   482  
   483  //go:linkname reflect_typelinks reflect.typelinks
   484  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   485  	modules := activeModules()
   486  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   487  	ret := [][]int32{modules[0].typelinks}
   488  	for _, md := range modules[1:] {
   489  		sections = append(sections, unsafe.Pointer(md.types))
   490  		ret = append(ret, md.typelinks)
   491  	}
   492  	return sections, ret
   493  }
   494  
   495  // reflect_resolveNameOff resolves a name offset from a base pointer.
   496  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   497  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   498  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   499  }
   500  
   501  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   502  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   503  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   504  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   505  }
   506  
   507  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   508  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   509  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   510  	return (*_type)(rtype).textOff(textOff(off))
   511  
   512  }
   513  
   514  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   515  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   516  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   517  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   518  }
   519  
   520  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   521  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   522  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   523  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   524  }
   525  
   526  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   527  //go:linkname reflect_addReflectOff reflect.addReflectOff
   528  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   529  	reflectOffsLock()
   530  	if reflectOffs.m == nil {
   531  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   532  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   533  		reflectOffs.next = -1
   534  	}
   535  	id, found := reflectOffs.minv[ptr]
   536  	if !found {
   537  		id = reflectOffs.next
   538  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   539  		reflectOffs.m[id] = ptr
   540  		reflectOffs.minv[ptr] = id
   541  	}
   542  	reflectOffsUnlock()
   543  	return id
   544  }
   545  

View as plain text