...
Run Format

Source file src/runtime/runtime1.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	t := atomic.Load(&traceback_cache)
    39  	crash = t&tracebackCrash != 0
    40  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
    41  	if _g_.m.traceback != 0 {
    42  		level = int32(_g_.m.traceback)
    43  	} else {
    44  		level = int32(t >> tracebackShift)
    45  	}
    46  	return
    47  }
    48  
    49  var (
    50  	argc int32
    51  	argv **byte
    52  )
    53  
    54  // nosplit for use in linux startup sysargs
    55  //go:nosplit
    56  func argv_index(argv **byte, i int32) *byte {
    57  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    58  }
    59  
    60  func args(c int32, v **byte) {
    61  	argc = c
    62  	argv = v
    63  	sysargs(c, v)
    64  }
    65  
    66  func goargs() {
    67  	if GOOS == "windows" {
    68  		return
    69  	}
    70  	argslice = make([]string, argc)
    71  	for i := int32(0); i < argc; i++ {
    72  		argslice[i] = gostringnocopy(argv_index(argv, i))
    73  	}
    74  }
    75  
    76  func goenvs_unix() {
    77  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    78  	// guarantee env[] will immediately follow argv. Might cause
    79  	// problems.
    80  	n := int32(0)
    81  	for argv_index(argv, argc+1+n) != nil {
    82  		n++
    83  	}
    84  
    85  	envs = make([]string, n)
    86  	for i := int32(0); i < n; i++ {
    87  		envs[i] = gostring(argv_index(argv, argc+1+i))
    88  	}
    89  }
    90  
    91  func environ() []string {
    92  	return envs
    93  }
    94  
    95  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    96  // align stack variables on 386.
    97  var test_z64, test_x64 uint64
    98  
    99  func testAtomic64() {
   100  	test_z64 = 42
   101  	test_x64 = 0
   102  	if atomic.Cas64(&test_z64, test_x64, 1) {
   103  		throw("cas64 failed")
   104  	}
   105  	if test_x64 != 0 {
   106  		throw("cas64 failed")
   107  	}
   108  	test_x64 = 42
   109  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   110  		throw("cas64 failed")
   111  	}
   112  	if test_x64 != 42 || test_z64 != 1 {
   113  		throw("cas64 failed")
   114  	}
   115  	if atomic.Load64(&test_z64) != 1 {
   116  		throw("load64 failed")
   117  	}
   118  	atomic.Store64(&test_z64, (1<<40)+1)
   119  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   120  		throw("store64 failed")
   121  	}
   122  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   123  		throw("xadd64 failed")
   124  	}
   125  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   126  		throw("xadd64 failed")
   127  	}
   128  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   129  		throw("xchg64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   132  		throw("xchg64 failed")
   133  	}
   134  }
   135  
   136  func check() {
   137  	var (
   138  		a     int8
   139  		b     uint8
   140  		c     int16
   141  		d     uint16
   142  		e     int32
   143  		f     uint32
   144  		g     int64
   145  		h     uint64
   146  		i, i1 float32
   147  		j, j1 float64
   148  		k, k1 unsafe.Pointer
   149  		l     *uint16
   150  		m     [4]byte
   151  	)
   152  	type x1t struct {
   153  		x uint8
   154  	}
   155  	type y1t struct {
   156  		x1 x1t
   157  		y  uint8
   158  	}
   159  	var x1 x1t
   160  	var y1 y1t
   161  
   162  	if unsafe.Sizeof(a) != 1 {
   163  		throw("bad a")
   164  	}
   165  	if unsafe.Sizeof(b) != 1 {
   166  		throw("bad b")
   167  	}
   168  	if unsafe.Sizeof(c) != 2 {
   169  		throw("bad c")
   170  	}
   171  	if unsafe.Sizeof(d) != 2 {
   172  		throw("bad d")
   173  	}
   174  	if unsafe.Sizeof(e) != 4 {
   175  		throw("bad e")
   176  	}
   177  	if unsafe.Sizeof(f) != 4 {
   178  		throw("bad f")
   179  	}
   180  	if unsafe.Sizeof(g) != 8 {
   181  		throw("bad g")
   182  	}
   183  	if unsafe.Sizeof(h) != 8 {
   184  		throw("bad h")
   185  	}
   186  	if unsafe.Sizeof(i) != 4 {
   187  		throw("bad i")
   188  	}
   189  	if unsafe.Sizeof(j) != 8 {
   190  		throw("bad j")
   191  	}
   192  	if unsafe.Sizeof(k) != sys.PtrSize {
   193  		throw("bad k")
   194  	}
   195  	if unsafe.Sizeof(l) != sys.PtrSize {
   196  		throw("bad l")
   197  	}
   198  	if unsafe.Sizeof(x1) != 1 {
   199  		throw("bad unsafe.Sizeof x1")
   200  	}
   201  	if unsafe.Offsetof(y1.y) != 1 {
   202  		throw("bad offsetof y1.y")
   203  	}
   204  	if unsafe.Sizeof(y1) != 2 {
   205  		throw("bad unsafe.Sizeof y1")
   206  	}
   207  
   208  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   209  		throw("bad timediv")
   210  	}
   211  
   212  	var z uint32
   213  	z = 1
   214  	if !atomic.Cas(&z, 1, 2) {
   215  		throw("cas1")
   216  	}
   217  	if z != 2 {
   218  		throw("cas2")
   219  	}
   220  
   221  	z = 4
   222  	if atomic.Cas(&z, 5, 6) {
   223  		throw("cas3")
   224  	}
   225  	if z != 4 {
   226  		throw("cas4")
   227  	}
   228  
   229  	z = 0xffffffff
   230  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   231  		throw("cas5")
   232  	}
   233  	if z != 0xfffffffe {
   234  		throw("cas6")
   235  	}
   236  
   237  	k = unsafe.Pointer(uintptr(0xfedcb123))
   238  	if sys.PtrSize == 8 {
   239  		k = unsafe.Pointer(uintptr(k) << 10)
   240  	}
   241  	if casp(&k, nil, nil) {
   242  		throw("casp1")
   243  	}
   244  	k1 = add(k, 1)
   245  	if !casp(&k, k, k1) {
   246  		throw("casp2")
   247  	}
   248  	if k != k1 {
   249  		throw("casp3")
   250  	}
   251  
   252  	m = [4]byte{1, 1, 1, 1}
   253  	atomic.Or8(&m[1], 0xf0)
   254  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   255  		throw("atomicor8")
   256  	}
   257  
   258  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   259  	atomic.And8(&m[1], 0x1)
   260  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   261  		throw("atomicand8")
   262  	}
   263  
   264  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   265  	if j == j {
   266  		throw("float64nan")
   267  	}
   268  	if !(j != j) {
   269  		throw("float64nan1")
   270  	}
   271  
   272  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   273  	if j == j1 {
   274  		throw("float64nan2")
   275  	}
   276  	if !(j != j1) {
   277  		throw("float64nan3")
   278  	}
   279  
   280  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   281  	if i == i {
   282  		throw("float32nan")
   283  	}
   284  	if i == i {
   285  		throw("float32nan1")
   286  	}
   287  
   288  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   289  	if i == i1 {
   290  		throw("float32nan2")
   291  	}
   292  	if i == i1 {
   293  		throw("float32nan3")
   294  	}
   295  
   296  	testAtomic64()
   297  
   298  	if _FixedStack != round2(_FixedStack) {
   299  		throw("FixedStack is not power-of-2")
   300  	}
   301  
   302  	if !checkASM() {
   303  		throw("assembly checks failed")
   304  	}
   305  }
   306  
   307  type dbgVar struct {
   308  	name  string
   309  	value *int32
   310  }
   311  
   312  // Holds variables parsed from GODEBUG env var,
   313  // except for "memprofilerate" since there is an
   314  // existing int var for that value, which may
   315  // already have an initial value.
   316  var debug struct {
   317  	allocfreetrace     int32
   318  	cgocheck           int32
   319  	efence             int32
   320  	gccheckmark        int32
   321  	gcpacertrace       int32
   322  	gcshrinkstackoff   int32
   323  	gcrescanstacks     int32
   324  	gcstoptheworld     int32
   325  	gctrace            int32
   326  	invalidptr         int32
   327  	sbrk               int32
   328  	scavenge           int32
   329  	scheddetail        int32
   330  	schedtrace         int32
   331  	tracebackancestors int32
   332  }
   333  
   334  var dbgvars = []dbgVar{
   335  	{"allocfreetrace", &debug.allocfreetrace},
   336  	{"cgocheck", &debug.cgocheck},
   337  	{"efence", &debug.efence},
   338  	{"gccheckmark", &debug.gccheckmark},
   339  	{"gcpacertrace", &debug.gcpacertrace},
   340  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   341  	{"gcrescanstacks", &debug.gcrescanstacks},
   342  	{"gcstoptheworld", &debug.gcstoptheworld},
   343  	{"gctrace", &debug.gctrace},
   344  	{"invalidptr", &debug.invalidptr},
   345  	{"sbrk", &debug.sbrk},
   346  	{"scavenge", &debug.scavenge},
   347  	{"scheddetail", &debug.scheddetail},
   348  	{"schedtrace", &debug.schedtrace},
   349  	{"tracebackancestors", &debug.tracebackancestors},
   350  }
   351  
   352  func parsedebugvars() {
   353  	// defaults
   354  	debug.cgocheck = 1
   355  	debug.invalidptr = 1
   356  
   357  	for p := gogetenv("GODEBUG"); p != ""; {
   358  		field := ""
   359  		i := index(p, ",")
   360  		if i < 0 {
   361  			field, p = p, ""
   362  		} else {
   363  			field, p = p[:i], p[i+1:]
   364  		}
   365  		i = index(field, "=")
   366  		if i < 0 {
   367  			continue
   368  		}
   369  		key, value := field[:i], field[i+1:]
   370  
   371  		// Update MemProfileRate directly here since it
   372  		// is int, not int32, and should only be updated
   373  		// if specified in GODEBUG.
   374  		if key == "memprofilerate" {
   375  			if n, ok := atoi(value); ok {
   376  				MemProfileRate = n
   377  			}
   378  		} else {
   379  			for _, v := range dbgvars {
   380  				if v.name == key {
   381  					if n, ok := atoi32(value); ok {
   382  						*v.value = n
   383  					}
   384  				}
   385  			}
   386  		}
   387  	}
   388  
   389  	setTraceback(gogetenv("GOTRACEBACK"))
   390  	traceback_env = traceback_cache
   391  }
   392  
   393  //go:linkname setTraceback runtime/debug.SetTraceback
   394  func setTraceback(level string) {
   395  	var t uint32
   396  	switch level {
   397  	case "none":
   398  		t = 0
   399  	case "single", "":
   400  		t = 1 << tracebackShift
   401  	case "all":
   402  		t = 1<<tracebackShift | tracebackAll
   403  	case "system":
   404  		t = 2<<tracebackShift | tracebackAll
   405  	case "crash":
   406  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   407  	default:
   408  		t = tracebackAll
   409  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   410  			t |= uint32(n) << tracebackShift
   411  		}
   412  	}
   413  	// when C owns the process, simply exit'ing the process on fatal errors
   414  	// and panics is surprising. Be louder and abort instead.
   415  	if islibrary || isarchive {
   416  		t |= tracebackCrash
   417  	}
   418  
   419  	t |= traceback_env
   420  
   421  	atomic.Store(&traceback_cache, t)
   422  }
   423  
   424  // Poor mans 64-bit division.
   425  // This is a very special function, do not use it if you are not sure what you are doing.
   426  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   427  // Handles overflow in a time-specific manner.
   428  //go:nosplit
   429  func timediv(v int64, div int32, rem *int32) int32 {
   430  	res := int32(0)
   431  	for bit := 30; bit >= 0; bit-- {
   432  		if v >= int64(div)<<uint(bit) {
   433  			v = v - (int64(div) << uint(bit))
   434  			res += 1 << uint(bit)
   435  		}
   436  	}
   437  	if v >= int64(div) {
   438  		if rem != nil {
   439  			*rem = 0
   440  		}
   441  		return 0x7fffffff
   442  	}
   443  	if rem != nil {
   444  		*rem = int32(v)
   445  	}
   446  	return res
   447  }
   448  
   449  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   450  
   451  //go:nosplit
   452  func acquirem() *m {
   453  	_g_ := getg()
   454  	_g_.m.locks++
   455  	return _g_.m
   456  }
   457  
   458  //go:nosplit
   459  func releasem(mp *m) {
   460  	_g_ := getg()
   461  	mp.locks--
   462  	if mp.locks == 0 && _g_.preempt {
   463  		// restore the preemption request in case we've cleared it in newstack
   464  		_g_.stackguard0 = stackPreempt
   465  	}
   466  }
   467  
   468  //go:nosplit
   469  func gomcache() *mcache {
   470  	return getg().m.mcache
   471  }
   472  
   473  //go:linkname reflect_typelinks reflect.typelinks
   474  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   475  	modules := activeModules()
   476  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   477  	ret := [][]int32{modules[0].typelinks}
   478  	for _, md := range modules[1:] {
   479  		sections = append(sections, unsafe.Pointer(md.types))
   480  		ret = append(ret, md.typelinks)
   481  	}
   482  	return sections, ret
   483  }
   484  
   485  // reflect_resolveNameOff resolves a name offset from a base pointer.
   486  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   487  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   488  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   489  }
   490  
   491  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   492  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   493  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   494  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   495  }
   496  
   497  // reflect_resolveTextOff resolves an function pointer offset from a base type.
   498  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   499  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   500  	return (*_type)(rtype).textOff(textOff(off))
   501  
   502  }
   503  
   504  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   505  //go:linkname reflect_addReflectOff reflect.addReflectOff
   506  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   507  	reflectOffsLock()
   508  	if reflectOffs.m == nil {
   509  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   510  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   511  		reflectOffs.next = -1
   512  	}
   513  	id, found := reflectOffs.minv[ptr]
   514  	if !found {
   515  		id = reflectOffs.next
   516  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   517  		reflectOffs.m[id] = ptr
   518  		reflectOffs.minv[ptr] = id
   519  	}
   520  	reflectOffsUnlock()
   521  	return id
   522  }
   523  

View as plain text