Source file src/runtime/runtime1.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	t := atomic.Load(&traceback_cache)
    39  	crash = t&tracebackCrash != 0
    40  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
    41  	if _g_.m.traceback != 0 {
    42  		level = int32(_g_.m.traceback)
    43  	} else {
    44  		level = int32(t >> tracebackShift)
    45  	}
    46  	return
    47  }
    48  
    49  var (
    50  	argc int32
    51  	argv **byte
    52  )
    53  
    54  // nosplit for use in linux startup sysargs
    55  //go:nosplit
    56  func argv_index(argv **byte, i int32) *byte {
    57  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    58  }
    59  
    60  func args(c int32, v **byte) {
    61  	argc = c
    62  	argv = v
    63  	sysargs(c, v)
    64  }
    65  
    66  func goargs() {
    67  	if GOOS == "windows" {
    68  		return
    69  	}
    70  	argslice = make([]string, argc)
    71  	for i := int32(0); i < argc; i++ {
    72  		argslice[i] = gostringnocopy(argv_index(argv, i))
    73  	}
    74  }
    75  
    76  func goenvs_unix() {
    77  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    78  	// guarantee env[] will immediately follow argv. Might cause
    79  	// problems.
    80  	n := int32(0)
    81  	for argv_index(argv, argc+1+n) != nil {
    82  		n++
    83  	}
    84  
    85  	envs = make([]string, n)
    86  	for i := int32(0); i < n; i++ {
    87  		envs[i] = gostring(argv_index(argv, argc+1+i))
    88  	}
    89  }
    90  
    91  func environ() []string {
    92  	return envs
    93  }
    94  
    95  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    96  // align stack variables on 386.
    97  var test_z64, test_x64 uint64
    98  
    99  func testAtomic64() {
   100  	test_z64 = 42
   101  	test_x64 = 0
   102  	if atomic.Cas64(&test_z64, test_x64, 1) {
   103  		throw("cas64 failed")
   104  	}
   105  	if test_x64 != 0 {
   106  		throw("cas64 failed")
   107  	}
   108  	test_x64 = 42
   109  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   110  		throw("cas64 failed")
   111  	}
   112  	if test_x64 != 42 || test_z64 != 1 {
   113  		throw("cas64 failed")
   114  	}
   115  	if atomic.Load64(&test_z64) != 1 {
   116  		throw("load64 failed")
   117  	}
   118  	atomic.Store64(&test_z64, (1<<40)+1)
   119  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   120  		throw("store64 failed")
   121  	}
   122  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   123  		throw("xadd64 failed")
   124  	}
   125  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   126  		throw("xadd64 failed")
   127  	}
   128  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   129  		throw("xchg64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   132  		throw("xchg64 failed")
   133  	}
   134  }
   135  
   136  func check() {
   137  	var (
   138  		a     int8
   139  		b     uint8
   140  		c     int16
   141  		d     uint16
   142  		e     int32
   143  		f     uint32
   144  		g     int64
   145  		h     uint64
   146  		i, i1 float32
   147  		j, j1 float64
   148  		k     unsafe.Pointer
   149  		l     *uint16
   150  		m     [4]byte
   151  	)
   152  	type x1t struct {
   153  		x uint8
   154  	}
   155  	type y1t struct {
   156  		x1 x1t
   157  		y  uint8
   158  	}
   159  	var x1 x1t
   160  	var y1 y1t
   161  
   162  	if unsafe.Sizeof(a) != 1 {
   163  		throw("bad a")
   164  	}
   165  	if unsafe.Sizeof(b) != 1 {
   166  		throw("bad b")
   167  	}
   168  	if unsafe.Sizeof(c) != 2 {
   169  		throw("bad c")
   170  	}
   171  	if unsafe.Sizeof(d) != 2 {
   172  		throw("bad d")
   173  	}
   174  	if unsafe.Sizeof(e) != 4 {
   175  		throw("bad e")
   176  	}
   177  	if unsafe.Sizeof(f) != 4 {
   178  		throw("bad f")
   179  	}
   180  	if unsafe.Sizeof(g) != 8 {
   181  		throw("bad g")
   182  	}
   183  	if unsafe.Sizeof(h) != 8 {
   184  		throw("bad h")
   185  	}
   186  	if unsafe.Sizeof(i) != 4 {
   187  		throw("bad i")
   188  	}
   189  	if unsafe.Sizeof(j) != 8 {
   190  		throw("bad j")
   191  	}
   192  	if unsafe.Sizeof(k) != sys.PtrSize {
   193  		throw("bad k")
   194  	}
   195  	if unsafe.Sizeof(l) != sys.PtrSize {
   196  		throw("bad l")
   197  	}
   198  	if unsafe.Sizeof(x1) != 1 {
   199  		throw("bad unsafe.Sizeof x1")
   200  	}
   201  	if unsafe.Offsetof(y1.y) != 1 {
   202  		throw("bad offsetof y1.y")
   203  	}
   204  	if unsafe.Sizeof(y1) != 2 {
   205  		throw("bad unsafe.Sizeof y1")
   206  	}
   207  
   208  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   209  		throw("bad timediv")
   210  	}
   211  
   212  	var z uint32
   213  	z = 1
   214  	if !atomic.Cas(&z, 1, 2) {
   215  		throw("cas1")
   216  	}
   217  	if z != 2 {
   218  		throw("cas2")
   219  	}
   220  
   221  	z = 4
   222  	if atomic.Cas(&z, 5, 6) {
   223  		throw("cas3")
   224  	}
   225  	if z != 4 {
   226  		throw("cas4")
   227  	}
   228  
   229  	z = 0xffffffff
   230  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   231  		throw("cas5")
   232  	}
   233  	if z != 0xfffffffe {
   234  		throw("cas6")
   235  	}
   236  
   237  	m = [4]byte{1, 1, 1, 1}
   238  	atomic.Or8(&m[1], 0xf0)
   239  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   240  		throw("atomicor8")
   241  	}
   242  
   243  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   244  	atomic.And8(&m[1], 0x1)
   245  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   246  		throw("atomicand8")
   247  	}
   248  
   249  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   250  	if j == j {
   251  		throw("float64nan")
   252  	}
   253  	if !(j != j) {
   254  		throw("float64nan1")
   255  	}
   256  
   257  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   258  	if j == j1 {
   259  		throw("float64nan2")
   260  	}
   261  	if !(j != j1) {
   262  		throw("float64nan3")
   263  	}
   264  
   265  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   266  	if i == i {
   267  		throw("float32nan")
   268  	}
   269  	if i == i {
   270  		throw("float32nan1")
   271  	}
   272  
   273  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   274  	if i == i1 {
   275  		throw("float32nan2")
   276  	}
   277  	if i == i1 {
   278  		throw("float32nan3")
   279  	}
   280  
   281  	testAtomic64()
   282  
   283  	if _FixedStack != round2(_FixedStack) {
   284  		throw("FixedStack is not power-of-2")
   285  	}
   286  
   287  	if !checkASM() {
   288  		throw("assembly checks failed")
   289  	}
   290  }
   291  
   292  type dbgVar struct {
   293  	name  string
   294  	value *int32
   295  }
   296  
   297  // Holds variables parsed from GODEBUG env var,
   298  // except for "memprofilerate" since there is an
   299  // existing int var for that value, which may
   300  // already have an initial value.
   301  var debug struct {
   302  	allocfreetrace     int32
   303  	cgocheck           int32
   304  	clobberfree        int32
   305  	efence             int32
   306  	gccheckmark        int32
   307  	gcpacertrace       int32
   308  	gcshrinkstackoff   int32
   309  	gcstoptheworld     int32
   310  	gctrace            int32
   311  	invalidptr         int32
   312  	madvdontneed       int32 // for Linux; issue 28466
   313  	sbrk               int32
   314  	scavenge           int32
   315  	scavtrace          int32
   316  	scheddetail        int32
   317  	schedtrace         int32
   318  	tracebackancestors int32
   319  	asyncpreemptoff    int32
   320  }
   321  
   322  var dbgvars = []dbgVar{
   323  	{"allocfreetrace", &debug.allocfreetrace},
   324  	{"clobberfree", &debug.clobberfree},
   325  	{"cgocheck", &debug.cgocheck},
   326  	{"efence", &debug.efence},
   327  	{"gccheckmark", &debug.gccheckmark},
   328  	{"gcpacertrace", &debug.gcpacertrace},
   329  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   330  	{"gcstoptheworld", &debug.gcstoptheworld},
   331  	{"gctrace", &debug.gctrace},
   332  	{"invalidptr", &debug.invalidptr},
   333  	{"madvdontneed", &debug.madvdontneed},
   334  	{"sbrk", &debug.sbrk},
   335  	{"scavenge", &debug.scavenge},
   336  	{"scavtrace", &debug.scavtrace},
   337  	{"scheddetail", &debug.scheddetail},
   338  	{"schedtrace", &debug.schedtrace},
   339  	{"tracebackancestors", &debug.tracebackancestors},
   340  	{"asyncpreemptoff", &debug.asyncpreemptoff},
   341  }
   342  
   343  func parsedebugvars() {
   344  	// defaults
   345  	debug.cgocheck = 1
   346  	debug.invalidptr = 1
   347  
   348  	for p := gogetenv("GODEBUG"); p != ""; {
   349  		field := ""
   350  		i := index(p, ",")
   351  		if i < 0 {
   352  			field, p = p, ""
   353  		} else {
   354  			field, p = p[:i], p[i+1:]
   355  		}
   356  		i = index(field, "=")
   357  		if i < 0 {
   358  			continue
   359  		}
   360  		key, value := field[:i], field[i+1:]
   361  
   362  		// Update MemProfileRate directly here since it
   363  		// is int, not int32, and should only be updated
   364  		// if specified in GODEBUG.
   365  		if key == "memprofilerate" {
   366  			if n, ok := atoi(value); ok {
   367  				MemProfileRate = n
   368  			}
   369  		} else {
   370  			for _, v := range dbgvars {
   371  				if v.name == key {
   372  					if n, ok := atoi32(value); ok {
   373  						*v.value = n
   374  					}
   375  				}
   376  			}
   377  		}
   378  	}
   379  
   380  	setTraceback(gogetenv("GOTRACEBACK"))
   381  	traceback_env = traceback_cache
   382  }
   383  
   384  //go:linkname setTraceback runtime/debug.SetTraceback
   385  func setTraceback(level string) {
   386  	var t uint32
   387  	switch level {
   388  	case "none":
   389  		t = 0
   390  	case "single", "":
   391  		t = 1 << tracebackShift
   392  	case "all":
   393  		t = 1<<tracebackShift | tracebackAll
   394  	case "system":
   395  		t = 2<<tracebackShift | tracebackAll
   396  	case "crash":
   397  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   398  	default:
   399  		t = tracebackAll
   400  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   401  			t |= uint32(n) << tracebackShift
   402  		}
   403  	}
   404  	// when C owns the process, simply exit'ing the process on fatal errors
   405  	// and panics is surprising. Be louder and abort instead.
   406  	if islibrary || isarchive {
   407  		t |= tracebackCrash
   408  	}
   409  
   410  	t |= traceback_env
   411  
   412  	atomic.Store(&traceback_cache, t)
   413  }
   414  
   415  // Poor mans 64-bit division.
   416  // This is a very special function, do not use it if you are not sure what you are doing.
   417  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   418  // Handles overflow in a time-specific manner.
   419  // This keeps us within no-split stack limits on 32-bit processors.
   420  //go:nosplit
   421  func timediv(v int64, div int32, rem *int32) int32 {
   422  	res := int32(0)
   423  	for bit := 30; bit >= 0; bit-- {
   424  		if v >= int64(div)<<uint(bit) {
   425  			v = v - (int64(div) << uint(bit))
   426  			// Before this for loop, res was 0, thus all these
   427  			// power of 2 increments are now just bitsets.
   428  			res |= 1 << uint(bit)
   429  		}
   430  	}
   431  	if v >= int64(div) {
   432  		if rem != nil {
   433  			*rem = 0
   434  		}
   435  		return 0x7fffffff
   436  	}
   437  	if rem != nil {
   438  		*rem = int32(v)
   439  	}
   440  	return res
   441  }
   442  
   443  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   444  
   445  //go:nosplit
   446  func acquirem() *m {
   447  	_g_ := getg()
   448  	_g_.m.locks++
   449  	return _g_.m
   450  }
   451  
   452  //go:nosplit
   453  func releasem(mp *m) {
   454  	_g_ := getg()
   455  	mp.locks--
   456  	if mp.locks == 0 && _g_.preempt {
   457  		// restore the preemption request in case we've cleared it in newstack
   458  		_g_.stackguard0 = stackPreempt
   459  	}
   460  }
   461  
   462  //go:nosplit
   463  func gomcache() *mcache {
   464  	return getg().m.mcache
   465  }
   466  
   467  //go:linkname reflect_typelinks reflect.typelinks
   468  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   469  	modules := activeModules()
   470  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   471  	ret := [][]int32{modules[0].typelinks}
   472  	for _, md := range modules[1:] {
   473  		sections = append(sections, unsafe.Pointer(md.types))
   474  		ret = append(ret, md.typelinks)
   475  	}
   476  	return sections, ret
   477  }
   478  
   479  // reflect_resolveNameOff resolves a name offset from a base pointer.
   480  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   481  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   482  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   483  }
   484  
   485  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   486  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   487  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   488  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   489  }
   490  
   491  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   492  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   493  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   494  	return (*_type)(rtype).textOff(textOff(off))
   495  
   496  }
   497  
   498  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   499  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   500  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   501  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   502  }
   503  
   504  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   505  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   506  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   507  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   508  }
   509  
   510  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   511  //go:linkname reflect_addReflectOff reflect.addReflectOff
   512  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   513  	reflectOffsLock()
   514  	if reflectOffs.m == nil {
   515  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   516  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   517  		reflectOffs.next = -1
   518  	}
   519  	id, found := reflectOffs.minv[ptr]
   520  	if !found {
   521  		id = reflectOffs.next
   522  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   523  		reflectOffs.m[id] = ptr
   524  		reflectOffs.minv[ptr] = id
   525  	}
   526  	reflectOffsUnlock()
   527  	return id
   528  }
   529  

View as plain text