...
Run Format

Source file src/runtime/runtime1.go

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"runtime/internal/sys"
    10		"unsafe"
    11	)
    12	
    13	// Keep a cached value to make gotraceback fast,
    14	// since we call it on every call to gentraceback.
    15	// The cached value is a uint32 in which the low bits
    16	// are the "crash" and "all" settings and the remaining
    17	// bits are the traceback value (0 off, 1 on, 2 include system).
    18	const (
    19		tracebackCrash = 1 << iota
    20		tracebackAll
    21		tracebackShift = iota
    22	)
    23	
    24	var traceback_cache uint32 = 2 << tracebackShift
    25	var traceback_env uint32
    26	
    27	// gotraceback returns the current traceback settings.
    28	//
    29	// If level is 0, suppress all tracebacks.
    30	// If level is 1, show tracebacks, but exclude runtime frames.
    31	// If level is 2, show tracebacks including runtime frames.
    32	// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33	// If crash is set, crash (core dump, etc) after tracebacking.
    34	//
    35	//go:nosplit
    36	func gotraceback() (level int32, all, crash bool) {
    37		_g_ := getg()
    38		all = _g_.m.throwing > 0
    39		if _g_.m.traceback != 0 {
    40			level = int32(_g_.m.traceback)
    41			return
    42		}
    43		t := atomic.Load(&traceback_cache)
    44		crash = t&tracebackCrash != 0
    45		all = all || t&tracebackAll != 0
    46		level = int32(t >> tracebackShift)
    47		return
    48	}
    49	
    50	var (
    51		argc int32
    52		argv **byte
    53	)
    54	
    55	// nosplit for use in linux startup sysargs
    56	//go:nosplit
    57	func argv_index(argv **byte, i int32) *byte {
    58		return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    59	}
    60	
    61	func args(c int32, v **byte) {
    62		argc = c
    63		argv = v
    64		sysargs(c, v)
    65	}
    66	
    67	func goargs() {
    68		if GOOS == "windows" {
    69			return
    70		}
    71		argslice = make([]string, argc)
    72		for i := int32(0); i < argc; i++ {
    73			argslice[i] = gostringnocopy(argv_index(argv, i))
    74		}
    75	}
    76	
    77	func goenvs_unix() {
    78		// TODO(austin): ppc64 in dynamic linking mode doesn't
    79		// guarantee env[] will immediately follow argv. Might cause
    80		// problems.
    81		n := int32(0)
    82		for argv_index(argv, argc+1+n) != nil {
    83			n++
    84		}
    85	
    86		envs = make([]string, n)
    87		for i := int32(0); i < n; i++ {
    88			envs[i] = gostring(argv_index(argv, argc+1+i))
    89		}
    90	}
    91	
    92	func environ() []string {
    93		return envs
    94	}
    95	
    96	// TODO: These should be locals in testAtomic64, but we don't 8-byte
    97	// align stack variables on 386.
    98	var test_z64, test_x64 uint64
    99	
   100	func testAtomic64() {
   101		test_z64 = 42
   102		test_x64 = 0
   103		prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
   104		prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   105		prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   106		prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   107		if atomic.Cas64(&test_z64, test_x64, 1) {
   108			throw("cas64 failed")
   109		}
   110		if test_x64 != 0 {
   111			throw("cas64 failed")
   112		}
   113		test_x64 = 42
   114		if !atomic.Cas64(&test_z64, test_x64, 1) {
   115			throw("cas64 failed")
   116		}
   117		if test_x64 != 42 || test_z64 != 1 {
   118			throw("cas64 failed")
   119		}
   120		if atomic.Load64(&test_z64) != 1 {
   121			throw("load64 failed")
   122		}
   123		atomic.Store64(&test_z64, (1<<40)+1)
   124		if atomic.Load64(&test_z64) != (1<<40)+1 {
   125			throw("store64 failed")
   126		}
   127		if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   128			throw("xadd64 failed")
   129		}
   130		if atomic.Load64(&test_z64) != (2<<40)+2 {
   131			throw("xadd64 failed")
   132		}
   133		if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   134			throw("xchg64 failed")
   135		}
   136		if atomic.Load64(&test_z64) != (3<<40)+3 {
   137			throw("xchg64 failed")
   138		}
   139	}
   140	
   141	func check() {
   142		var (
   143			a     int8
   144			b     uint8
   145			c     int16
   146			d     uint16
   147			e     int32
   148			f     uint32
   149			g     int64
   150			h     uint64
   151			i, i1 float32
   152			j, j1 float64
   153			k, k1 unsafe.Pointer
   154			l     *uint16
   155			m     [4]byte
   156		)
   157		type x1t struct {
   158			x uint8
   159		}
   160		type y1t struct {
   161			x1 x1t
   162			y  uint8
   163		}
   164		var x1 x1t
   165		var y1 y1t
   166	
   167		if unsafe.Sizeof(a) != 1 {
   168			throw("bad a")
   169		}
   170		if unsafe.Sizeof(b) != 1 {
   171			throw("bad b")
   172		}
   173		if unsafe.Sizeof(c) != 2 {
   174			throw("bad c")
   175		}
   176		if unsafe.Sizeof(d) != 2 {
   177			throw("bad d")
   178		}
   179		if unsafe.Sizeof(e) != 4 {
   180			throw("bad e")
   181		}
   182		if unsafe.Sizeof(f) != 4 {
   183			throw("bad f")
   184		}
   185		if unsafe.Sizeof(g) != 8 {
   186			throw("bad g")
   187		}
   188		if unsafe.Sizeof(h) != 8 {
   189			throw("bad h")
   190		}
   191		if unsafe.Sizeof(i) != 4 {
   192			throw("bad i")
   193		}
   194		if unsafe.Sizeof(j) != 8 {
   195			throw("bad j")
   196		}
   197		if unsafe.Sizeof(k) != sys.PtrSize {
   198			throw("bad k")
   199		}
   200		if unsafe.Sizeof(l) != sys.PtrSize {
   201			throw("bad l")
   202		}
   203		if unsafe.Sizeof(x1) != 1 {
   204			throw("bad unsafe.Sizeof x1")
   205		}
   206		if unsafe.Offsetof(y1.y) != 1 {
   207			throw("bad offsetof y1.y")
   208		}
   209		if unsafe.Sizeof(y1) != 2 {
   210			throw("bad unsafe.Sizeof y1")
   211		}
   212	
   213		if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   214			throw("bad timediv")
   215		}
   216	
   217		var z uint32
   218		z = 1
   219		if !atomic.Cas(&z, 1, 2) {
   220			throw("cas1")
   221		}
   222		if z != 2 {
   223			throw("cas2")
   224		}
   225	
   226		z = 4
   227		if atomic.Cas(&z, 5, 6) {
   228			throw("cas3")
   229		}
   230		if z != 4 {
   231			throw("cas4")
   232		}
   233	
   234		z = 0xffffffff
   235		if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   236			throw("cas5")
   237		}
   238		if z != 0xfffffffe {
   239			throw("cas6")
   240		}
   241	
   242		k = unsafe.Pointer(uintptr(0xfedcb123))
   243		if sys.PtrSize == 8 {
   244			k = unsafe.Pointer(uintptr(k) << 10)
   245		}
   246		if casp(&k, nil, nil) {
   247			throw("casp1")
   248		}
   249		k1 = add(k, 1)
   250		if !casp(&k, k, k1) {
   251			throw("casp2")
   252		}
   253		if k != k1 {
   254			throw("casp3")
   255		}
   256	
   257		m = [4]byte{1, 1, 1, 1}
   258		atomic.Or8(&m[1], 0xf0)
   259		if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   260			throw("atomicor8")
   261		}
   262	
   263		m = [4]byte{0xff, 0xff, 0xff, 0xff}
   264		atomic.And8(&m[1], 0x1)
   265		if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   266			throw("atomicand8")
   267		}
   268	
   269		*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   270		if j == j {
   271			throw("float64nan")
   272		}
   273		if !(j != j) {
   274			throw("float64nan1")
   275		}
   276	
   277		*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   278		if j == j1 {
   279			throw("float64nan2")
   280		}
   281		if !(j != j1) {
   282			throw("float64nan3")
   283		}
   284	
   285		*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   286		if i == i {
   287			throw("float32nan")
   288		}
   289		if i == i {
   290			throw("float32nan1")
   291		}
   292	
   293		*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   294		if i == i1 {
   295			throw("float32nan2")
   296		}
   297		if i == i1 {
   298			throw("float32nan3")
   299		}
   300	
   301		testAtomic64()
   302	
   303		if _FixedStack != round2(_FixedStack) {
   304			throw("FixedStack is not power-of-2")
   305		}
   306	
   307		if !checkASM() {
   308			throw("assembly checks failed")
   309		}
   310	}
   311	
   312	type dbgVar struct {
   313		name  string
   314		value *int32
   315	}
   316	
   317	// Holds variables parsed from GODEBUG env var,
   318	// except for "memprofilerate" since there is an
   319	// existing int var for that value, which may
   320	// already have an initial value.
   321	var debug struct {
   322		allocfreetrace    int32
   323		cgocheck          int32
   324		efence            int32
   325		gccheckmark       int32
   326		gcpacertrace      int32
   327		gcshrinkstackoff  int32
   328		gcstackbarrieroff int32
   329		gcstackbarrierall int32
   330		gcrescanstacks    int32
   331		gcstoptheworld    int32
   332		gctrace           int32
   333		invalidptr        int32
   334		sbrk              int32
   335		scavenge          int32
   336		scheddetail       int32
   337		schedtrace        int32
   338		wbshadow          int32
   339	}
   340	
   341	var dbgvars = []dbgVar{
   342		{"allocfreetrace", &debug.allocfreetrace},
   343		{"cgocheck", &debug.cgocheck},
   344		{"efence", &debug.efence},
   345		{"gccheckmark", &debug.gccheckmark},
   346		{"gcpacertrace", &debug.gcpacertrace},
   347		{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   348		{"gcstackbarrieroff", &debug.gcstackbarrieroff},
   349		{"gcstackbarrierall", &debug.gcstackbarrierall},
   350		{"gcrescanstacks", &debug.gcrescanstacks},
   351		{"gcstoptheworld", &debug.gcstoptheworld},
   352		{"gctrace", &debug.gctrace},
   353		{"invalidptr", &debug.invalidptr},
   354		{"sbrk", &debug.sbrk},
   355		{"scavenge", &debug.scavenge},
   356		{"scheddetail", &debug.scheddetail},
   357		{"schedtrace", &debug.schedtrace},
   358		{"wbshadow", &debug.wbshadow},
   359	}
   360	
   361	func parsedebugvars() {
   362		// defaults
   363		debug.cgocheck = 1
   364		debug.invalidptr = 1
   365	
   366		for p := gogetenv("GODEBUG"); p != ""; {
   367			field := ""
   368			i := index(p, ",")
   369			if i < 0 {
   370				field, p = p, ""
   371			} else {
   372				field, p = p[:i], p[i+1:]
   373			}
   374			i = index(field, "=")
   375			if i < 0 {
   376				continue
   377			}
   378			key, value := field[:i], field[i+1:]
   379	
   380			// Update MemProfileRate directly here since it
   381			// is int, not int32, and should only be updated
   382			// if specified in GODEBUG.
   383			if key == "memprofilerate" {
   384				if n, ok := atoi(value); ok {
   385					MemProfileRate = n
   386				}
   387			} else {
   388				for _, v := range dbgvars {
   389					if v.name == key {
   390						if n, ok := atoi32(value); ok {
   391							*v.value = n
   392						}
   393					}
   394				}
   395			}
   396		}
   397	
   398		setTraceback(gogetenv("GOTRACEBACK"))
   399		traceback_env = traceback_cache
   400	
   401		if debug.gcrescanstacks == 0 {
   402			// Without rescanning, there's no need for stack
   403			// barriers.
   404			debug.gcstackbarrieroff = 1
   405			debug.gcstackbarrierall = 0
   406		}
   407	
   408		if debug.gcstackbarrierall > 0 {
   409			firstStackBarrierOffset = 0
   410		}
   411	
   412		// For cgocheck > 1, we turn on the write barrier at all times
   413		// and check all pointer writes.
   414		if debug.cgocheck > 1 {
   415			writeBarrier.cgo = true
   416			writeBarrier.enabled = true
   417		}
   418	}
   419	
   420	//go:linkname setTraceback runtime/debug.SetTraceback
   421	func setTraceback(level string) {
   422		var t uint32
   423		switch level {
   424		case "none":
   425			t = 0
   426		case "single", "":
   427			t = 1 << tracebackShift
   428		case "all":
   429			t = 1<<tracebackShift | tracebackAll
   430		case "system":
   431			t = 2<<tracebackShift | tracebackAll
   432		case "crash":
   433			t = 2<<tracebackShift | tracebackAll | tracebackCrash
   434		default:
   435			t = tracebackAll
   436			if n, ok := atoi(level); ok && n == int(uint32(n)) {
   437				t |= uint32(n) << tracebackShift
   438			}
   439		}
   440		// when C owns the process, simply exit'ing the process on fatal errors
   441		// and panics is surprising. Be louder and abort instead.
   442		if islibrary || isarchive {
   443			t |= tracebackCrash
   444		}
   445	
   446		t |= traceback_env
   447	
   448		atomic.Store(&traceback_cache, t)
   449	}
   450	
   451	// Poor mans 64-bit division.
   452	// This is a very special function, do not use it if you are not sure what you are doing.
   453	// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   454	// Handles overflow in a time-specific manner.
   455	//go:nosplit
   456	func timediv(v int64, div int32, rem *int32) int32 {
   457		res := int32(0)
   458		for bit := 30; bit >= 0; bit-- {
   459			if v >= int64(div)<<uint(bit) {
   460				v = v - (int64(div) << uint(bit))
   461				res += 1 << uint(bit)
   462			}
   463		}
   464		if v >= int64(div) {
   465			if rem != nil {
   466				*rem = 0
   467			}
   468			return 0x7fffffff
   469		}
   470		if rem != nil {
   471			*rem = int32(v)
   472		}
   473		return res
   474	}
   475	
   476	// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   477	
   478	//go:nosplit
   479	func acquirem() *m {
   480		_g_ := getg()
   481		_g_.m.locks++
   482		return _g_.m
   483	}
   484	
   485	//go:nosplit
   486	func releasem(mp *m) {
   487		_g_ := getg()
   488		mp.locks--
   489		if mp.locks == 0 && _g_.preempt {
   490			// restore the preemption request in case we've cleared it in newstack
   491			_g_.stackguard0 = stackPreempt
   492		}
   493	}
   494	
   495	//go:nosplit
   496	func gomcache() *mcache {
   497		return getg().m.mcache
   498	}
   499	
   500	//go:linkname reflect_typelinks reflect.typelinks
   501	func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   502		modules := activeModules()
   503		sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   504		ret := [][]int32{modules[0].typelinks}
   505		for _, md := range modules[1:] {
   506			sections = append(sections, unsafe.Pointer(md.types))
   507			ret = append(ret, md.typelinks)
   508		}
   509		return sections, ret
   510	}
   511	
   512	// reflect_resolveNameOff resolves a name offset from a base pointer.
   513	//go:linkname reflect_resolveNameOff reflect.resolveNameOff
   514	func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   515		return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   516	}
   517	
   518	// reflect_resolveTypeOff resolves an *rtype offset from a base type.
   519	//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   520	func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   521		return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   522	}
   523	
   524	// reflect_resolveTextOff resolves an function pointer offset from a base type.
   525	//go:linkname reflect_resolveTextOff reflect.resolveTextOff
   526	func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   527		return (*_type)(rtype).textOff(textOff(off))
   528	
   529	}
   530	
   531	// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   532	//go:linkname reflect_addReflectOff reflect.addReflectOff
   533	func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   534		reflectOffsLock()
   535		if reflectOffs.m == nil {
   536			reflectOffs.m = make(map[int32]unsafe.Pointer)
   537			reflectOffs.minv = make(map[unsafe.Pointer]int32)
   538			reflectOffs.next = -1
   539		}
   540		id, found := reflectOffs.minv[ptr]
   541		if !found {
   542			id = reflectOffs.next
   543			reflectOffs.next-- // use negative offsets as IDs to aid debugging
   544			reflectOffs.m[id] = ptr
   545			reflectOffs.minv[ptr] = id
   546		}
   547		reflectOffsUnlock()
   548		return id
   549	}
   550	

View as plain text