Source file src/runtime/iface.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  const itabInitSize = 512
    16  
    17  var (
    18  	itabLock      mutex                               // lock for accessing itab table
    19  	itabTable     = &itabTableInit                    // pointer to current table
    20  	itabTableInit = itabTableType{size: itabInitSize} // starter table
    21  )
    22  
    23  // Note: change the formula in the mallocgc call in itabAdd if you change these fields.
    24  type itabTableType struct {
    25  	size    uintptr             // length of entries array. Always a power of 2.
    26  	count   uintptr             // current number of filled entries.
    27  	entries [itabInitSize]*itab // really [size] large
    28  }
    29  
    30  func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
    31  	// compiler has provided some good hash codes for us.
    32  	return uintptr(inter.Type.Hash ^ typ.Hash)
    33  }
    34  
    35  func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
    36  	if len(inter.Methods) == 0 {
    37  		throw("internal error - misuse of itab")
    38  	}
    39  
    40  	// easy case
    41  	if typ.TFlag&abi.TFlagUncommon == 0 {
    42  		if canfail {
    43  			return nil
    44  		}
    45  		name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
    46  		panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
    47  	}
    48  
    49  	var m *itab
    50  
    51  	// First, look in the existing table to see if we can find the itab we need.
    52  	// This is by far the most common case, so do it without locks.
    53  	// Use atomic to ensure we see any previous writes done by the thread
    54  	// that updates the itabTable field (with atomic.Storep in itabAdd).
    55  	t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
    56  	if m = t.find(inter, typ); m != nil {
    57  		goto finish
    58  	}
    59  
    60  	// Not found.  Grab the lock and try again.
    61  	lock(&itabLock)
    62  	if m = itabTable.find(inter, typ); m != nil {
    63  		unlock(&itabLock)
    64  		goto finish
    65  	}
    66  
    67  	// Entry doesn't exist yet. Make a new entry & add it.
    68  	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
    69  	m.inter = inter
    70  	m._type = typ
    71  	// The hash is used in type switches. However, compiler statically generates itab's
    72  	// for all interface/type pairs used in switches (which are added to itabTable
    73  	// in itabsinit). The dynamically-generated itab's never participate in type switches,
    74  	// and thus the hash is irrelevant.
    75  	// Note: m.hash is _not_ the hash used for the runtime itabTable hash table.
    76  	m.hash = 0
    77  	m.init()
    78  	itabAdd(m)
    79  	unlock(&itabLock)
    80  finish:
    81  	if m.fun[0] != 0 {
    82  		return m
    83  	}
    84  	if canfail {
    85  		return nil
    86  	}
    87  	// this can only happen if the conversion
    88  	// was already done once using the , ok form
    89  	// and we have a cached negative result.
    90  	// The cached result doesn't record which
    91  	// interface function was missing, so initialize
    92  	// the itab again to get the missing function name.
    93  	panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: m.init()})
    94  }
    95  
    96  // find finds the given interface/type pair in t.
    97  // Returns nil if the given interface/type pair isn't present.
    98  func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
    99  	// Implemented using quadratic probing.
   100  	// Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k.
   101  	// We're guaranteed to hit all table entries using this probe sequence.
   102  	mask := t.size - 1
   103  	h := itabHashFunc(inter, typ) & mask
   104  	for i := uintptr(1); ; i++ {
   105  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   106  		// Use atomic read here so if we see m != nil, we also see
   107  		// the initializations of the fields of m.
   108  		// m := *p
   109  		m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
   110  		if m == nil {
   111  			return nil
   112  		}
   113  		if m.inter == inter && m._type == typ {
   114  			return m
   115  		}
   116  		h += i
   117  		h &= mask
   118  	}
   119  }
   120  
   121  // itabAdd adds the given itab to the itab hash table.
   122  // itabLock must be held.
   123  func itabAdd(m *itab) {
   124  	// Bugs can lead to calling this while mallocing is set,
   125  	// typically because this is called while panicking.
   126  	// Crash reliably, rather than only when we need to grow
   127  	// the hash table.
   128  	if getg().m.mallocing != 0 {
   129  		throw("malloc deadlock")
   130  	}
   131  
   132  	t := itabTable
   133  	if t.count >= 3*(t.size/4) { // 75% load factor
   134  		// Grow hash table.
   135  		// t2 = new(itabTableType) + some additional entries
   136  		// We lie and tell malloc we want pointer-free memory because
   137  		// all the pointed-to values are not in the heap.
   138  		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
   139  		t2.size = t.size * 2
   140  
   141  		// Copy over entries.
   142  		// Note: while copying, other threads may look for an itab and
   143  		// fail to find it. That's ok, they will then try to get the itab lock
   144  		// and as a consequence wait until this copying is complete.
   145  		iterate_itabs(t2.add)
   146  		if t2.count != t.count {
   147  			throw("mismatched count during itab table copy")
   148  		}
   149  		// Publish new hash table. Use an atomic write: see comment in getitab.
   150  		atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
   151  		// Adopt the new table as our own.
   152  		t = itabTable
   153  		// Note: the old table can be GC'ed here.
   154  	}
   155  	t.add(m)
   156  }
   157  
   158  // add adds the given itab to itab table t.
   159  // itabLock must be held.
   160  func (t *itabTableType) add(m *itab) {
   161  	// See comment in find about the probe sequence.
   162  	// Insert new itab in the first empty spot in the probe sequence.
   163  	mask := t.size - 1
   164  	h := itabHashFunc(m.inter, m._type) & mask
   165  	for i := uintptr(1); ; i++ {
   166  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   167  		m2 := *p
   168  		if m2 == m {
   169  			// A given itab may be used in more than one module
   170  			// and thanks to the way global symbol resolution works, the
   171  			// pointed-to itab may already have been inserted into the
   172  			// global 'hash'.
   173  			return
   174  		}
   175  		if m2 == nil {
   176  			// Use atomic write here so if a reader sees m, it also
   177  			// sees the correctly initialized fields of m.
   178  			// NoWB is ok because m is not in heap memory.
   179  			// *p = m
   180  			atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
   181  			t.count++
   182  			return
   183  		}
   184  		h += i
   185  		h &= mask
   186  	}
   187  }
   188  
   189  // init fills in the m.fun array with all the code pointers for
   190  // the m.inter/m._type pair. If the type does not implement the interface,
   191  // it sets m.fun[0] to 0 and returns the name of an interface function that is missing.
   192  // It is ok to call this multiple times on the same m, even concurrently.
   193  func (m *itab) init() string {
   194  	inter := m.inter
   195  	typ := m._type
   196  	x := typ.Uncommon()
   197  
   198  	// both inter and typ have method sorted by name,
   199  	// and interface names are unique,
   200  	// so can iterate over both in lock step;
   201  	// the loop is O(ni+nt) not O(ni*nt).
   202  	ni := len(inter.Methods)
   203  	nt := int(x.Mcount)
   204  	xmhdr := (*[1 << 16]abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff)))[:nt:nt]
   205  	j := 0
   206  	methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.fun[0]))[:ni:ni]
   207  	var fun0 unsafe.Pointer
   208  imethods:
   209  	for k := 0; k < ni; k++ {
   210  		i := &inter.Methods[k]
   211  		itype := toRType(&inter.Type).typeOff(i.Typ)
   212  		name := toRType(&inter.Type).nameOff(i.Name)
   213  		iname := name.Name()
   214  		ipkg := pkgPath(name)
   215  		if ipkg == "" {
   216  			ipkg = inter.PkgPath.Name()
   217  		}
   218  		for ; j < nt; j++ {
   219  			t := &xmhdr[j]
   220  			rtyp := toRType(typ)
   221  			tname := rtyp.nameOff(t.Name)
   222  			if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
   223  				pkgPath := pkgPath(tname)
   224  				if pkgPath == "" {
   225  					pkgPath = rtyp.nameOff(x.PkgPath).Name()
   226  				}
   227  				if tname.IsExported() || pkgPath == ipkg {
   228  					ifn := rtyp.textOff(t.Ifn)
   229  					if k == 0 {
   230  						fun0 = ifn // we'll set m.fun[0] at the end
   231  					} else {
   232  						methods[k] = ifn
   233  					}
   234  					continue imethods
   235  				}
   236  			}
   237  		}
   238  		// didn't find method
   239  		m.fun[0] = 0
   240  		return iname
   241  	}
   242  	m.fun[0] = uintptr(fun0)
   243  	return ""
   244  }
   245  
   246  func itabsinit() {
   247  	lockInit(&itabLock, lockRankItab)
   248  	lock(&itabLock)
   249  	for _, md := range activeModules() {
   250  		for _, i := range md.itablinks {
   251  			itabAdd(i)
   252  		}
   253  	}
   254  	unlock(&itabLock)
   255  }
   256  
   257  // panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
   258  // have = the dynamic type we have.
   259  // want = the static type we're trying to convert to.
   260  // iface = the static type we're converting from.
   261  func panicdottypeE(have, want, iface *_type) {
   262  	panic(&TypeAssertionError{iface, have, want, ""})
   263  }
   264  
   265  // panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
   266  // Same args as panicdottypeE, but "have" is the dynamic itab we have.
   267  func panicdottypeI(have *itab, want, iface *_type) {
   268  	var t *_type
   269  	if have != nil {
   270  		t = have._type
   271  	}
   272  	panicdottypeE(t, want, iface)
   273  }
   274  
   275  // panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
   276  // want = the static type we're trying to convert to.
   277  func panicnildottype(want *_type) {
   278  	panic(&TypeAssertionError{nil, nil, want, ""})
   279  	// TODO: Add the static type we're converting from as well.
   280  	// It might generate a better error message.
   281  	// Just to match other nil conversion errors, we don't for now.
   282  }
   283  
   284  // The specialized convTx routines need a type descriptor to use when calling mallocgc.
   285  // We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness.
   286  // However, when debugging, it'd be nice to have some indication in mallocgc where the types came from,
   287  // so we use named types here.
   288  // We then construct interface values of these types,
   289  // and then extract the type word to use as needed.
   290  type (
   291  	uint16InterfacePtr uint16
   292  	uint32InterfacePtr uint32
   293  	uint64InterfacePtr uint64
   294  	stringInterfacePtr string
   295  	sliceInterfacePtr  []byte
   296  )
   297  
   298  var (
   299  	uint16Eface any = uint16InterfacePtr(0)
   300  	uint32Eface any = uint32InterfacePtr(0)
   301  	uint64Eface any = uint64InterfacePtr(0)
   302  	stringEface any = stringInterfacePtr("")
   303  	sliceEface  any = sliceInterfacePtr(nil)
   304  
   305  	uint16Type *_type = efaceOf(&uint16Eface)._type
   306  	uint32Type *_type = efaceOf(&uint32Eface)._type
   307  	uint64Type *_type = efaceOf(&uint64Eface)._type
   308  	stringType *_type = efaceOf(&stringEface)._type
   309  	sliceType  *_type = efaceOf(&sliceEface)._type
   310  )
   311  
   312  // The conv and assert functions below do very similar things.
   313  // The convXXX functions are guaranteed by the compiler to succeed.
   314  // The assertXXX functions may fail (either panicking or returning false,
   315  // depending on whether they are 1-result or 2-result).
   316  // The convXXX functions succeed on a nil input, whereas the assertXXX
   317  // functions fail on a nil input.
   318  
   319  // convT converts a value of type t, which is pointed to by v, to a pointer that can
   320  // be used as the second word of an interface value.
   321  func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
   322  	if raceenabled {
   323  		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
   324  	}
   325  	if msanenabled {
   326  		msanread(v, t.Size_)
   327  	}
   328  	if asanenabled {
   329  		asanread(v, t.Size_)
   330  	}
   331  	x := mallocgc(t.Size_, t, true)
   332  	typedmemmove(t, x, v)
   333  	return x
   334  }
   335  func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
   336  	// TODO: maybe take size instead of type?
   337  	if raceenabled {
   338  		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr))
   339  	}
   340  	if msanenabled {
   341  		msanread(v, t.Size_)
   342  	}
   343  	if asanenabled {
   344  		asanread(v, t.Size_)
   345  	}
   346  
   347  	x := mallocgc(t.Size_, t, false)
   348  	memmove(x, v, t.Size_)
   349  	return x
   350  }
   351  
   352  func convT16(val uint16) (x unsafe.Pointer) {
   353  	if val < uint16(len(staticuint64s)) {
   354  		x = unsafe.Pointer(&staticuint64s[val])
   355  		if goarch.BigEndian {
   356  			x = add(x, 6)
   357  		}
   358  	} else {
   359  		x = mallocgc(2, uint16Type, false)
   360  		*(*uint16)(x) = val
   361  	}
   362  	return
   363  }
   364  
   365  func convT32(val uint32) (x unsafe.Pointer) {
   366  	if val < uint32(len(staticuint64s)) {
   367  		x = unsafe.Pointer(&staticuint64s[val])
   368  		if goarch.BigEndian {
   369  			x = add(x, 4)
   370  		}
   371  	} else {
   372  		x = mallocgc(4, uint32Type, false)
   373  		*(*uint32)(x) = val
   374  	}
   375  	return
   376  }
   377  
   378  func convT64(val uint64) (x unsafe.Pointer) {
   379  	if val < uint64(len(staticuint64s)) {
   380  		x = unsafe.Pointer(&staticuint64s[val])
   381  	} else {
   382  		x = mallocgc(8, uint64Type, false)
   383  		*(*uint64)(x) = val
   384  	}
   385  	return
   386  }
   387  
   388  func convTstring(val string) (x unsafe.Pointer) {
   389  	if val == "" {
   390  		x = unsafe.Pointer(&zeroVal[0])
   391  	} else {
   392  		x = mallocgc(unsafe.Sizeof(val), stringType, true)
   393  		*(*string)(x) = val
   394  	}
   395  	return
   396  }
   397  
   398  func convTslice(val []byte) (x unsafe.Pointer) {
   399  	// Note: this must work for any element type, not just byte.
   400  	if (*slice)(unsafe.Pointer(&val)).array == nil {
   401  		x = unsafe.Pointer(&zeroVal[0])
   402  	} else {
   403  		x = mallocgc(unsafe.Sizeof(val), sliceType, true)
   404  		*(*[]byte)(x) = val
   405  	}
   406  	return
   407  }
   408  
   409  func assertE2I(inter *interfacetype, t *_type) *itab {
   410  	if t == nil {
   411  		// explicit conversions require non-nil interface value.
   412  		panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
   413  	}
   414  	return getitab(inter, t, false)
   415  }
   416  
   417  func assertE2I2(inter *interfacetype, t *_type) *itab {
   418  	if t == nil {
   419  		return nil
   420  	}
   421  	return getitab(inter, t, true)
   422  }
   423  
   424  // typeAssert builds an itab for the concrete type t and the
   425  // interface type s.Inter. If the conversion is not possible it
   426  // panics if s.CanFail is false and returns nil if s.CanFail is true.
   427  func typeAssert(s *abi.TypeAssert, t *_type) *itab {
   428  	var tab *itab
   429  	if t == nil {
   430  		if !s.CanFail {
   431  			panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
   432  		}
   433  	} else {
   434  		tab = getitab(s.Inter, t, s.CanFail)
   435  	}
   436  
   437  	if !abi.UseInterfaceSwitchCache(GOARCH) {
   438  		return tab
   439  	}
   440  
   441  	// Maybe update the cache, so the next time the generated code
   442  	// doesn't need to call into the runtime.
   443  	if cheaprand()&1023 != 0 {
   444  		// Only bother updating the cache ~1 in 1000 times.
   445  		return tab
   446  	}
   447  	// Load the current cache.
   448  	oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   449  
   450  	if cheaprand()&uint32(oldC.Mask) != 0 {
   451  		// As cache gets larger, choose to update it less often
   452  		// so we can amortize the cost of building a new cache.
   453  		return tab
   454  	}
   455  
   456  	// Make a new cache.
   457  	newC := buildTypeAssertCache(oldC, t, tab)
   458  
   459  	// Update cache. Use compare-and-swap so if multiple threads
   460  	// are fighting to update the cache, at least one of their
   461  	// updates will stick.
   462  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   463  
   464  	return tab
   465  }
   466  
   467  func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
   468  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   469  
   470  	// Count the number of entries we need.
   471  	n := 1
   472  	for _, e := range oldEntries {
   473  		if e.Typ != 0 {
   474  			n++
   475  		}
   476  	}
   477  
   478  	// Figure out how big a table we need.
   479  	// We need at least one more slot than the number of entries
   480  	// so that we are guaranteed an empty slot (for termination).
   481  	newN := n * 2                         // make it at most 50% full
   482  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   483  
   484  	// Allocate the new table.
   485  	newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
   486  	newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
   487  	newC.Mask = uintptr(newN - 1)
   488  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   489  
   490  	// Fill the new table.
   491  	addEntry := func(typ *_type, tab *itab) {
   492  		h := int(typ.Hash) & (newN - 1)
   493  		for {
   494  			if newEntries[h].Typ == 0 {
   495  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   496  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   497  				return
   498  			}
   499  			h = (h + 1) & (newN - 1)
   500  		}
   501  	}
   502  	for _, e := range oldEntries {
   503  		if e.Typ != 0 {
   504  			addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
   505  		}
   506  	}
   507  	addEntry(typ, tab)
   508  
   509  	return newC
   510  }
   511  
   512  // Empty type assert cache. Contains one entry with a nil Typ (which
   513  // causes a cache lookup to fail immediately.)
   514  var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
   515  
   516  // interfaceSwitch compares t against the list of cases in s.
   517  // If t matches case i, interfaceSwitch returns the case index i and
   518  // an itab for the pair <t, s.Cases[i]>.
   519  // If there is no match, return N,nil, where N is the number
   520  // of cases.
   521  func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
   522  	cases := unsafe.Slice(&s.Cases[0], s.NCases)
   523  
   524  	// Results if we don't find a match.
   525  	case_ := len(cases)
   526  	var tab *itab
   527  
   528  	// Look through each case in order.
   529  	for i, c := range cases {
   530  		tab = getitab(c, t, true)
   531  		if tab != nil {
   532  			case_ = i
   533  			break
   534  		}
   535  	}
   536  
   537  	if !abi.UseInterfaceSwitchCache(GOARCH) {
   538  		return case_, tab
   539  	}
   540  
   541  	// Maybe update the cache, so the next time the generated code
   542  	// doesn't need to call into the runtime.
   543  	if cheaprand()&1023 != 0 {
   544  		// Only bother updating the cache ~1 in 1000 times.
   545  		// This ensures we don't waste memory on switches, or
   546  		// switch arguments, that only happen a few times.
   547  		return case_, tab
   548  	}
   549  	// Load the current cache.
   550  	oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   551  
   552  	if cheaprand()&uint32(oldC.Mask) != 0 {
   553  		// As cache gets larger, choose to update it less often
   554  		// so we can amortize the cost of building a new cache
   555  		// (that cost is linear in oldc.Mask).
   556  		return case_, tab
   557  	}
   558  
   559  	// Make a new cache.
   560  	newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
   561  
   562  	// Update cache. Use compare-and-swap so if multiple threads
   563  	// are fighting to update the cache, at least one of their
   564  	// updates will stick.
   565  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   566  
   567  	return case_, tab
   568  }
   569  
   570  // buildInterfaceSwitchCache constructs an interface switch cache
   571  // containing all the entries from oldC plus the new entry
   572  // (typ,case_,tab).
   573  func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
   574  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   575  
   576  	// Count the number of entries we need.
   577  	n := 1
   578  	for _, e := range oldEntries {
   579  		if e.Typ != 0 {
   580  			n++
   581  		}
   582  	}
   583  
   584  	// Figure out how big a table we need.
   585  	// We need at least one more slot than the number of entries
   586  	// so that we are guaranteed an empty slot (for termination).
   587  	newN := n * 2                         // make it at most 50% full
   588  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   589  
   590  	// Allocate the new table.
   591  	newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
   592  	newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
   593  	newC.Mask = uintptr(newN - 1)
   594  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   595  
   596  	// Fill the new table.
   597  	addEntry := func(typ *_type, case_ int, tab *itab) {
   598  		h := int(typ.Hash) & (newN - 1)
   599  		for {
   600  			if newEntries[h].Typ == 0 {
   601  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   602  				newEntries[h].Case = case_
   603  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   604  				return
   605  			}
   606  			h = (h + 1) & (newN - 1)
   607  		}
   608  	}
   609  	for _, e := range oldEntries {
   610  		if e.Typ != 0 {
   611  			addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
   612  		}
   613  	}
   614  	addEntry(typ, case_, tab)
   615  
   616  	return newC
   617  }
   618  
   619  // Empty interface switch cache. Contains one entry with a nil Typ (which
   620  // causes a cache lookup to fail immediately.)
   621  var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
   622  
   623  //go:linkname reflect_ifaceE2I reflect.ifaceE2I
   624  func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   625  	*dst = iface{assertE2I(inter, e._type), e.data}
   626  }
   627  
   628  //go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
   629  func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   630  	*dst = iface{assertE2I(inter, e._type), e.data}
   631  }
   632  
   633  func iterate_itabs(fn func(*itab)) {
   634  	// Note: only runs during stop the world or with itabLock held,
   635  	// so no other locks/atomics needed.
   636  	t := itabTable
   637  	for i := uintptr(0); i < t.size; i++ {
   638  		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
   639  		if m != nil {
   640  			fn(m)
   641  		}
   642  	}
   643  }
   644  
   645  // staticuint64s is used to avoid allocating in convTx for small integer values.
   646  var staticuint64s = [...]uint64{
   647  	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
   648  	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
   649  	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
   650  	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
   651  	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
   652  	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
   653  	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
   654  	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
   655  	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
   656  	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
   657  	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
   658  	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
   659  	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
   660  	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
   661  	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
   662  	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
   663  	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
   664  	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
   665  	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
   666  	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
   667  	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
   668  	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
   669  	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
   670  	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
   671  	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
   672  	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
   673  	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
   674  	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
   675  	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
   676  	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
   677  	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
   678  	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
   679  }
   680  
   681  // The linker redirects a reference of a method that it determined
   682  // unreachable to a reference to this function, so it will throw if
   683  // ever called.
   684  func unreachableMethod() {
   685  	throw("unreachable method called. linker bug?")
   686  }
   687  

View as plain text