Source file src/runtime/runtime1.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // Keep a cached value to make gotraceback fast,
    15  // since we call it on every call to gentraceback.
    16  // The cached value is a uint32 in which the low bits
    17  // are the "crash" and "all" settings and the remaining
    18  // bits are the traceback value (0 off, 1 on, 2 include system).
    19  const (
    20  	tracebackCrash = 1 << iota
    21  	tracebackAll
    22  	tracebackShift = iota
    23  )
    24  
    25  var traceback_cache uint32 = 2 << tracebackShift
    26  var traceback_env uint32
    27  
    28  // gotraceback returns the current traceback settings.
    29  //
    30  // If level is 0, suppress all tracebacks.
    31  // If level is 1, show tracebacks, but exclude runtime frames.
    32  // If level is 2, show tracebacks including runtime frames.
    33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    34  // If crash is set, crash (core dump, etc) after tracebacking.
    35  //
    36  //go:nosplit
    37  func gotraceback() (level int32, all, crash bool) {
    38  	gp := getg()
    39  	t := atomic.Load(&traceback_cache)
    40  	crash = t&tracebackCrash != 0
    41  	all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
    42  	if gp.m.traceback != 0 {
    43  		level = int32(gp.m.traceback)
    44  	} else if gp.m.throwing >= throwTypeRuntime {
    45  		// Always include runtime frames in runtime throws unless
    46  		// otherwise overridden by m.traceback.
    47  		level = 2
    48  	} else {
    49  		level = int32(t >> tracebackShift)
    50  	}
    51  	return
    52  }
    53  
    54  var (
    55  	argc int32
    56  	argv **byte
    57  )
    58  
    59  // nosplit for use in linux startup sysargs.
    60  //
    61  //go:nosplit
    62  func argv_index(argv **byte, i int32) *byte {
    63  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
    64  }
    65  
    66  func args(c int32, v **byte) {
    67  	argc = c
    68  	argv = v
    69  	sysargs(c, v)
    70  }
    71  
    72  func goargs() {
    73  	if GOOS == "windows" {
    74  		return
    75  	}
    76  	argslice = make([]string, argc)
    77  	for i := int32(0); i < argc; i++ {
    78  		argslice[i] = gostringnocopy(argv_index(argv, i))
    79  	}
    80  }
    81  
    82  func goenvs_unix() {
    83  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    84  	// guarantee env[] will immediately follow argv. Might cause
    85  	// problems.
    86  	n := int32(0)
    87  	for argv_index(argv, argc+1+n) != nil {
    88  		n++
    89  	}
    90  
    91  	envs = make([]string, n)
    92  	for i := int32(0); i < n; i++ {
    93  		envs[i] = gostring(argv_index(argv, argc+1+i))
    94  	}
    95  }
    96  
    97  func environ() []string {
    98  	return envs
    99  }
   100  
   101  // TODO: These should be locals in testAtomic64, but we don't 8-byte
   102  // align stack variables on 386.
   103  var test_z64, test_x64 uint64
   104  
   105  func testAtomic64() {
   106  	test_z64 = 42
   107  	test_x64 = 0
   108  	if atomic.Cas64(&test_z64, test_x64, 1) {
   109  		throw("cas64 failed")
   110  	}
   111  	if test_x64 != 0 {
   112  		throw("cas64 failed")
   113  	}
   114  	test_x64 = 42
   115  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   116  		throw("cas64 failed")
   117  	}
   118  	if test_x64 != 42 || test_z64 != 1 {
   119  		throw("cas64 failed")
   120  	}
   121  	if atomic.Load64(&test_z64) != 1 {
   122  		throw("load64 failed")
   123  	}
   124  	atomic.Store64(&test_z64, (1<<40)+1)
   125  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   126  		throw("store64 failed")
   127  	}
   128  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   129  		throw("xadd64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   132  		throw("xadd64 failed")
   133  	}
   134  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   135  		throw("xchg64 failed")
   136  	}
   137  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   138  		throw("xchg64 failed")
   139  	}
   140  }
   141  
   142  func check() {
   143  	var (
   144  		a     int8
   145  		b     uint8
   146  		c     int16
   147  		d     uint16
   148  		e     int32
   149  		f     uint32
   150  		g     int64
   151  		h     uint64
   152  		i, i1 float32
   153  		j, j1 float64
   154  		k     unsafe.Pointer
   155  		l     *uint16
   156  		m     [4]byte
   157  	)
   158  	type x1t struct {
   159  		x uint8
   160  	}
   161  	type y1t struct {
   162  		x1 x1t
   163  		y  uint8
   164  	}
   165  	var x1 x1t
   166  	var y1 y1t
   167  
   168  	if unsafe.Sizeof(a) != 1 {
   169  		throw("bad a")
   170  	}
   171  	if unsafe.Sizeof(b) != 1 {
   172  		throw("bad b")
   173  	}
   174  	if unsafe.Sizeof(c) != 2 {
   175  		throw("bad c")
   176  	}
   177  	if unsafe.Sizeof(d) != 2 {
   178  		throw("bad d")
   179  	}
   180  	if unsafe.Sizeof(e) != 4 {
   181  		throw("bad e")
   182  	}
   183  	if unsafe.Sizeof(f) != 4 {
   184  		throw("bad f")
   185  	}
   186  	if unsafe.Sizeof(g) != 8 {
   187  		throw("bad g")
   188  	}
   189  	if unsafe.Sizeof(h) != 8 {
   190  		throw("bad h")
   191  	}
   192  	if unsafe.Sizeof(i) != 4 {
   193  		throw("bad i")
   194  	}
   195  	if unsafe.Sizeof(j) != 8 {
   196  		throw("bad j")
   197  	}
   198  	if unsafe.Sizeof(k) != goarch.PtrSize {
   199  		throw("bad k")
   200  	}
   201  	if unsafe.Sizeof(l) != goarch.PtrSize {
   202  		throw("bad l")
   203  	}
   204  	if unsafe.Sizeof(x1) != 1 {
   205  		throw("bad unsafe.Sizeof x1")
   206  	}
   207  	if unsafe.Offsetof(y1.y) != 1 {
   208  		throw("bad offsetof y1.y")
   209  	}
   210  	if unsafe.Sizeof(y1) != 2 {
   211  		throw("bad unsafe.Sizeof y1")
   212  	}
   213  
   214  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   215  		throw("bad timediv")
   216  	}
   217  
   218  	var z uint32
   219  	z = 1
   220  	if !atomic.Cas(&z, 1, 2) {
   221  		throw("cas1")
   222  	}
   223  	if z != 2 {
   224  		throw("cas2")
   225  	}
   226  
   227  	z = 4
   228  	if atomic.Cas(&z, 5, 6) {
   229  		throw("cas3")
   230  	}
   231  	if z != 4 {
   232  		throw("cas4")
   233  	}
   234  
   235  	z = 0xffffffff
   236  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   237  		throw("cas5")
   238  	}
   239  	if z != 0xfffffffe {
   240  		throw("cas6")
   241  	}
   242  
   243  	m = [4]byte{1, 1, 1, 1}
   244  	atomic.Or8(&m[1], 0xf0)
   245  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   246  		throw("atomicor8")
   247  	}
   248  
   249  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   250  	atomic.And8(&m[1], 0x1)
   251  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   252  		throw("atomicand8")
   253  	}
   254  
   255  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   256  	if j == j {
   257  		throw("float64nan")
   258  	}
   259  	if !(j != j) {
   260  		throw("float64nan1")
   261  	}
   262  
   263  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   264  	if j == j1 {
   265  		throw("float64nan2")
   266  	}
   267  	if !(j != j1) {
   268  		throw("float64nan3")
   269  	}
   270  
   271  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   272  	if i == i {
   273  		throw("float32nan")
   274  	}
   275  	if i == i {
   276  		throw("float32nan1")
   277  	}
   278  
   279  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   280  	if i == i1 {
   281  		throw("float32nan2")
   282  	}
   283  	if i == i1 {
   284  		throw("float32nan3")
   285  	}
   286  
   287  	testAtomic64()
   288  
   289  	if fixedStack != round2(fixedStack) {
   290  		throw("FixedStack is not power-of-2")
   291  	}
   292  
   293  	if !checkASM() {
   294  		throw("assembly checks failed")
   295  	}
   296  }
   297  
   298  type dbgVar struct {
   299  	name   string
   300  	value  *int32        // for variables that can only be set at startup
   301  	atomic *atomic.Int32 // for variables that can be changed during execution
   302  	def    int32         // default value (ideally zero)
   303  }
   304  
   305  // Holds variables parsed from GODEBUG env var,
   306  // except for "memprofilerate" since there is an
   307  // existing int var for that value, which may
   308  // already have an initial value.
   309  var debug struct {
   310  	cgocheck                int32
   311  	clobberfree             int32
   312  	disablethp              int32
   313  	dontfreezetheworld      int32
   314  	efence                  int32
   315  	gccheckmark             int32
   316  	gcpacertrace            int32
   317  	gcshrinkstackoff        int32
   318  	gcstoptheworld          int32
   319  	gctrace                 int32
   320  	invalidptr              int32
   321  	madvdontneed            int32 // for Linux; issue 28466
   322  	runtimeContentionStacks atomic.Int32
   323  	scavtrace               int32
   324  	scheddetail             int32
   325  	schedtrace              int32
   326  	tracebackancestors      int32
   327  	asyncpreemptoff         int32
   328  	harddecommit            int32
   329  	adaptivestackstart      int32
   330  	tracefpunwindoff        int32
   331  	traceadvanceperiod      int32
   332  
   333  	// debug.malloc is used as a combined debug check
   334  	// in the malloc function and should be set
   335  	// if any of the below debug options is != 0.
   336  	malloc         bool
   337  	allocfreetrace int32
   338  	inittrace      int32
   339  	sbrk           int32
   340  
   341  	panicnil atomic.Int32
   342  }
   343  
   344  var dbgvars = []*dbgVar{
   345  	{name: "allocfreetrace", value: &debug.allocfreetrace},
   346  	{name: "clobberfree", value: &debug.clobberfree},
   347  	{name: "cgocheck", value: &debug.cgocheck},
   348  	{name: "disablethp", value: &debug.disablethp},
   349  	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
   350  	{name: "efence", value: &debug.efence},
   351  	{name: "gccheckmark", value: &debug.gccheckmark},
   352  	{name: "gcpacertrace", value: &debug.gcpacertrace},
   353  	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
   354  	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
   355  	{name: "gctrace", value: &debug.gctrace},
   356  	{name: "invalidptr", value: &debug.invalidptr},
   357  	{name: "madvdontneed", value: &debug.madvdontneed},
   358  	{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
   359  	{name: "sbrk", value: &debug.sbrk},
   360  	{name: "scavtrace", value: &debug.scavtrace},
   361  	{name: "scheddetail", value: &debug.scheddetail},
   362  	{name: "schedtrace", value: &debug.schedtrace},
   363  	{name: "tracebackancestors", value: &debug.tracebackancestors},
   364  	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
   365  	{name: "inittrace", value: &debug.inittrace},
   366  	{name: "harddecommit", value: &debug.harddecommit},
   367  	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
   368  	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
   369  	{name: "panicnil", atomic: &debug.panicnil},
   370  	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
   371  }
   372  
   373  func parsedebugvars() {
   374  	// defaults
   375  	debug.cgocheck = 1
   376  	debug.invalidptr = 1
   377  	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
   378  	if GOOS == "linux" {
   379  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   380  		// but doesn't affect many of the statistics that
   381  		// MADV_DONTNEED does until the memory is actually
   382  		// reclaimed. This generally leads to poor user
   383  		// experience, like confusing stats in top and other
   384  		// monitoring tools; and bad integration with
   385  		// management systems that respond to memory usage.
   386  		// Hence, default to MADV_DONTNEED.
   387  		debug.madvdontneed = 1
   388  	}
   389  	debug.traceadvanceperiod = defaultTraceAdvancePeriod
   390  
   391  	godebug := gogetenv("GODEBUG")
   392  
   393  	p := new(string)
   394  	*p = godebug
   395  	godebugEnv.Store(p)
   396  
   397  	// apply runtime defaults, if any
   398  	for _, v := range dbgvars {
   399  		if v.def != 0 {
   400  			// Every var should have either v.value or v.atomic set.
   401  			if v.value != nil {
   402  				*v.value = v.def
   403  			} else if v.atomic != nil {
   404  				v.atomic.Store(v.def)
   405  			}
   406  		}
   407  	}
   408  
   409  	// apply compile-time GODEBUG settings
   410  	parsegodebug(godebugDefault, nil)
   411  
   412  	// apply environment settings
   413  	parsegodebug(godebug, nil)
   414  
   415  	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
   416  
   417  	setTraceback(gogetenv("GOTRACEBACK"))
   418  	traceback_env = traceback_cache
   419  }
   420  
   421  // reparsedebugvars reparses the runtime's debug variables
   422  // because the environment variable has been changed to env.
   423  func reparsedebugvars(env string) {
   424  	seen := make(map[string]bool)
   425  	// apply environment settings
   426  	parsegodebug(env, seen)
   427  	// apply compile-time GODEBUG settings for as-yet-unseen variables
   428  	parsegodebug(godebugDefault, seen)
   429  	// apply defaults for as-yet-unseen variables
   430  	for _, v := range dbgvars {
   431  		if v.atomic != nil && !seen[v.name] {
   432  			v.atomic.Store(0)
   433  		}
   434  	}
   435  }
   436  
   437  // parsegodebug parses the godebug string, updating variables listed in dbgvars.
   438  // If seen == nil, this is startup time and we process the string left to right
   439  // overwriting older settings with newer ones.
   440  // If seen != nil, $GODEBUG has changed and we are doing an
   441  // incremental update. To avoid flapping in the case where a value is
   442  // set multiple times (perhaps in the default and the environment,
   443  // or perhaps twice in the environment), we process the string right-to-left
   444  // and only change values not already seen. After doing this for both
   445  // the environment and the default settings, the caller must also call
   446  // cleargodebug(seen) to reset any now-unset values back to their defaults.
   447  func parsegodebug(godebug string, seen map[string]bool) {
   448  	for p := godebug; p != ""; {
   449  		var field string
   450  		if seen == nil {
   451  			// startup: process left to right, overwriting older settings with newer
   452  			i := bytealg.IndexByteString(p, ',')
   453  			if i < 0 {
   454  				field, p = p, ""
   455  			} else {
   456  				field, p = p[:i], p[i+1:]
   457  			}
   458  		} else {
   459  			// incremental update: process right to left, updating and skipping seen
   460  			i := len(p) - 1
   461  			for i >= 0 && p[i] != ',' {
   462  				i--
   463  			}
   464  			if i < 0 {
   465  				p, field = "", p
   466  			} else {
   467  				p, field = p[:i], p[i+1:]
   468  			}
   469  		}
   470  		i := bytealg.IndexByteString(field, '=')
   471  		if i < 0 {
   472  			continue
   473  		}
   474  		key, value := field[:i], field[i+1:]
   475  		if seen[key] {
   476  			continue
   477  		}
   478  		if seen != nil {
   479  			seen[key] = true
   480  		}
   481  
   482  		// Update MemProfileRate directly here since it
   483  		// is int, not int32, and should only be updated
   484  		// if specified in GODEBUG.
   485  		if seen == nil && key == "memprofilerate" {
   486  			if n, ok := atoi(value); ok {
   487  				MemProfileRate = n
   488  			}
   489  		} else {
   490  			for _, v := range dbgvars {
   491  				if v.name == key {
   492  					if n, ok := atoi32(value); ok {
   493  						if seen == nil && v.value != nil {
   494  							*v.value = n
   495  						} else if v.atomic != nil {
   496  							v.atomic.Store(n)
   497  						}
   498  					}
   499  				}
   500  			}
   501  		}
   502  	}
   503  
   504  	if debug.cgocheck > 1 {
   505  		throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
   506  	}
   507  }
   508  
   509  //go:linkname setTraceback runtime/debug.SetTraceback
   510  func setTraceback(level string) {
   511  	var t uint32
   512  	switch level {
   513  	case "none":
   514  		t = 0
   515  	case "single", "":
   516  		t = 1 << tracebackShift
   517  	case "all":
   518  		t = 1<<tracebackShift | tracebackAll
   519  	case "system":
   520  		t = 2<<tracebackShift | tracebackAll
   521  	case "crash":
   522  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   523  	case "wer":
   524  		if GOOS == "windows" {
   525  			t = 2<<tracebackShift | tracebackAll | tracebackCrash
   526  			enableWER()
   527  			break
   528  		}
   529  		fallthrough
   530  	default:
   531  		t = tracebackAll
   532  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   533  			t |= uint32(n) << tracebackShift
   534  		}
   535  	}
   536  	// when C owns the process, simply exit'ing the process on fatal errors
   537  	// and panics is surprising. Be louder and abort instead.
   538  	if islibrary || isarchive {
   539  		t |= tracebackCrash
   540  	}
   541  
   542  	t |= traceback_env
   543  
   544  	atomic.Store(&traceback_cache, t)
   545  }
   546  
   547  // Poor mans 64-bit division.
   548  // This is a very special function, do not use it if you are not sure what you are doing.
   549  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   550  // Handles overflow in a time-specific manner.
   551  // This keeps us within no-split stack limits on 32-bit processors.
   552  //
   553  //go:nosplit
   554  func timediv(v int64, div int32, rem *int32) int32 {
   555  	res := int32(0)
   556  	for bit := 30; bit >= 0; bit-- {
   557  		if v >= int64(div)<<uint(bit) {
   558  			v = v - (int64(div) << uint(bit))
   559  			// Before this for loop, res was 0, thus all these
   560  			// power of 2 increments are now just bitsets.
   561  			res |= 1 << uint(bit)
   562  		}
   563  	}
   564  	if v >= int64(div) {
   565  		if rem != nil {
   566  			*rem = 0
   567  		}
   568  		return 0x7fffffff
   569  	}
   570  	if rem != nil {
   571  		*rem = int32(v)
   572  	}
   573  	return res
   574  }
   575  
   576  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   577  
   578  //go:nosplit
   579  func acquirem() *m {
   580  	gp := getg()
   581  	gp.m.locks++
   582  	return gp.m
   583  }
   584  
   585  //go:nosplit
   586  func releasem(mp *m) {
   587  	gp := getg()
   588  	mp.locks--
   589  	if mp.locks == 0 && gp.preempt {
   590  		// restore the preemption request in case we've cleared it in newstack
   591  		gp.stackguard0 = stackPreempt
   592  	}
   593  }
   594  
   595  //go:linkname reflect_typelinks reflect.typelinks
   596  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   597  	modules := activeModules()
   598  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   599  	ret := [][]int32{modules[0].typelinks}
   600  	for _, md := range modules[1:] {
   601  		sections = append(sections, unsafe.Pointer(md.types))
   602  		ret = append(ret, md.typelinks)
   603  	}
   604  	return sections, ret
   605  }
   606  
   607  // reflect_resolveNameOff resolves a name offset from a base pointer.
   608  //
   609  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   610  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   611  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   612  }
   613  
   614  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   615  //
   616  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   617  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   618  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   619  }
   620  
   621  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   622  //
   623  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   624  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   625  	return toRType((*_type)(rtype)).textOff(textOff(off))
   626  }
   627  
   628  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   629  //
   630  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   631  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   632  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   633  }
   634  
   635  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   636  //
   637  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   638  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   639  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   640  }
   641  
   642  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   643  //
   644  //go:linkname reflect_addReflectOff reflect.addReflectOff
   645  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   646  	reflectOffsLock()
   647  	if reflectOffs.m == nil {
   648  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   649  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   650  		reflectOffs.next = -1
   651  	}
   652  	id, found := reflectOffs.minv[ptr]
   653  	if !found {
   654  		id = reflectOffs.next
   655  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   656  		reflectOffs.m[id] = ptr
   657  		reflectOffs.minv[ptr] = id
   658  	}
   659  	reflectOffsUnlock()
   660  	return id
   661  }
   662  

View as plain text