...
Run Format

Source file src/runtime/export_test.go

Documentation: runtime

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  var Fadd64 = fadd64
    16  var Fsub64 = fsub64
    17  var Fmul64 = fmul64
    18  var Fdiv64 = fdiv64
    19  var F64to32 = f64to32
    20  var F32to64 = f32to64
    21  var Fcmp64 = fcmp64
    22  var Fintto64 = fintto64
    23  var F64toint = f64toint
    24  
    25  var Entersyscall = entersyscall
    26  var Exitsyscall = exitsyscall
    27  var LockedOSThread = lockedOSThread
    28  var Xadduintptr = atomic.Xadduintptr
    29  
    30  var FuncPC = funcPC
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  
    37  type LFNode struct {
    38  	Next    uint64
    39  	Pushcnt uintptr
    40  }
    41  
    42  func LFStackPush(head *uint64, node *LFNode) {
    43  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    44  }
    45  
    46  func LFStackPop(head *uint64) *LFNode {
    47  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    48  }
    49  
    50  func GCMask(x interface{}) (ret []byte) {
    51  	systemstack(func() {
    52  		ret = getgcmask(x)
    53  	})
    54  	return
    55  }
    56  
    57  func RunSchedLocalQueueTest() {
    58  	_p_ := new(p)
    59  	gs := make([]g, len(_p_.runq))
    60  	for i := 0; i < len(_p_.runq); i++ {
    61  		if g, _ := runqget(_p_); g != nil {
    62  			throw("runq is not empty initially")
    63  		}
    64  		for j := 0; j < i; j++ {
    65  			runqput(_p_, &gs[i], false)
    66  		}
    67  		for j := 0; j < i; j++ {
    68  			if g, _ := runqget(_p_); g != &gs[i] {
    69  				print("bad element at iter ", i, "/", j, "\n")
    70  				throw("bad element")
    71  			}
    72  		}
    73  		if g, _ := runqget(_p_); g != nil {
    74  			throw("runq is not empty afterwards")
    75  		}
    76  	}
    77  }
    78  
    79  func RunSchedLocalQueueStealTest() {
    80  	p1 := new(p)
    81  	p2 := new(p)
    82  	gs := make([]g, len(p1.runq))
    83  	for i := 0; i < len(p1.runq); i++ {
    84  		for j := 0; j < i; j++ {
    85  			gs[j].sig = 0
    86  			runqput(p1, &gs[j], false)
    87  		}
    88  		gp := runqsteal(p2, p1, true)
    89  		s := 0
    90  		if gp != nil {
    91  			s++
    92  			gp.sig++
    93  		}
    94  		for {
    95  			gp, _ = runqget(p2)
    96  			if gp == nil {
    97  				break
    98  			}
    99  			s++
   100  			gp.sig++
   101  		}
   102  		for {
   103  			gp, _ = runqget(p1)
   104  			if gp == nil {
   105  				break
   106  			}
   107  			gp.sig++
   108  		}
   109  		for j := 0; j < i; j++ {
   110  			if gs[j].sig != 1 {
   111  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   112  				throw("bad element")
   113  			}
   114  		}
   115  		if s != i/2 && s != i/2+1 {
   116  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   117  			throw("bad steal")
   118  		}
   119  	}
   120  }
   121  
   122  func RunSchedLocalQueueEmptyTest(iters int) {
   123  	// Test that runq is not spuriously reported as empty.
   124  	// Runq emptiness affects scheduling decisions and spurious emptiness
   125  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   126  	// for arbitrary long time).
   127  	done := make(chan bool, 1)
   128  	p := new(p)
   129  	gs := make([]g, 2)
   130  	ready := new(uint32)
   131  	for i := 0; i < iters; i++ {
   132  		*ready = 0
   133  		next0 := (i & 1) == 0
   134  		next1 := (i & 2) == 0
   135  		runqput(p, &gs[0], next0)
   136  		go func() {
   137  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   138  			}
   139  			if runqempty(p) {
   140  				println("next:", next0, next1)
   141  				throw("queue is empty")
   142  			}
   143  			done <- true
   144  		}()
   145  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   146  		}
   147  		runqput(p, &gs[1], next1)
   148  		runqget(p)
   149  		<-done
   150  		runqget(p)
   151  	}
   152  }
   153  
   154  var (
   155  	StringHash = stringHash
   156  	BytesHash  = bytesHash
   157  	Int32Hash  = int32Hash
   158  	Int64Hash  = int64Hash
   159  	MemHash    = memhash
   160  	MemHash32  = memhash32
   161  	MemHash64  = memhash64
   162  	EfaceHash  = efaceHash
   163  	IfaceHash  = ifaceHash
   164  )
   165  
   166  var UseAeshash = &useAeshash
   167  
   168  func MemclrBytes(b []byte) {
   169  	s := (*slice)(unsafe.Pointer(&b))
   170  	memclrNoHeapPointers(s.array, uintptr(s.len))
   171  }
   172  
   173  var HashLoad = &hashLoad
   174  
   175  // entry point for testing
   176  func GostringW(w []uint16) (s string) {
   177  	systemstack(func() {
   178  		s = gostringw(&w[0])
   179  	})
   180  	return
   181  }
   182  
   183  type Uintreg sys.Uintreg
   184  
   185  var Open = open
   186  var Close = closefd
   187  var Read = read
   188  var Write = write
   189  
   190  func Envs() []string     { return envs }
   191  func SetEnvs(e []string) { envs = e }
   192  
   193  var BigEndian = sys.BigEndian
   194  
   195  // For benchmarking.
   196  
   197  func BenchSetType(n int, x interface{}) {
   198  	e := *efaceOf(&x)
   199  	t := e._type
   200  	var size uintptr
   201  	var p unsafe.Pointer
   202  	switch t.kind & kindMask {
   203  	case kindPtr:
   204  		t = (*ptrtype)(unsafe.Pointer(t)).elem
   205  		size = t.size
   206  		p = e.data
   207  	case kindSlice:
   208  		slice := *(*struct {
   209  			ptr      unsafe.Pointer
   210  			len, cap uintptr
   211  		})(e.data)
   212  		t = (*slicetype)(unsafe.Pointer(t)).elem
   213  		size = t.size * slice.len
   214  		p = slice.ptr
   215  	}
   216  	allocSize := roundupsize(size)
   217  	systemstack(func() {
   218  		for i := 0; i < n; i++ {
   219  			heapBitsSetType(uintptr(p), allocSize, size, t)
   220  		}
   221  	})
   222  }
   223  
   224  const PtrSize = sys.PtrSize
   225  
   226  var ForceGCPeriod = &forcegcperiod
   227  
   228  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   229  // the "environment" traceback level, so later calls to
   230  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   231  func SetTracebackEnv(level string) {
   232  	setTraceback(level)
   233  	traceback_env = traceback_cache
   234  }
   235  
   236  var ReadUnaligned32 = readUnaligned32
   237  var ReadUnaligned64 = readUnaligned64
   238  
   239  func CountPagesInUse() (pagesInUse, counted uintptr) {
   240  	stopTheWorld("CountPagesInUse")
   241  
   242  	pagesInUse = uintptr(mheap_.pagesInUse)
   243  
   244  	for _, s := range mheap_.allspans {
   245  		if s.state == mSpanInUse {
   246  			counted += s.npages
   247  		}
   248  	}
   249  
   250  	startTheWorld()
   251  
   252  	return
   253  }
   254  
   255  func Fastrand() uint32          { return fastrand() }
   256  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   257  
   258  type ProfBuf profBuf
   259  
   260  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   261  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   262  }
   263  
   264  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   265  	(*profBuf)(p).write(tag, now, hdr, stk)
   266  }
   267  
   268  const (
   269  	ProfBufBlocking    = profBufBlocking
   270  	ProfBufNonBlocking = profBufNonBlocking
   271  )
   272  
   273  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   274  	return (*profBuf)(p).read(profBufReadMode(mode))
   275  }
   276  
   277  func (p *ProfBuf) Close() {
   278  	(*profBuf)(p).close()
   279  }
   280  
   281  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   282  // MemStats accumulated by scanning the heap.
   283  func ReadMemStatsSlow() (base, slow MemStats) {
   284  	stopTheWorld("ReadMemStatsSlow")
   285  
   286  	// Run on the system stack to avoid stack growth allocation.
   287  	systemstack(func() {
   288  		// Make sure stats don't change.
   289  		getg().m.mallocing++
   290  
   291  		readmemstats_m(&base)
   292  
   293  		// Initialize slow from base and zero the fields we're
   294  		// recomputing.
   295  		slow = base
   296  		slow.Alloc = 0
   297  		slow.TotalAlloc = 0
   298  		slow.Mallocs = 0
   299  		slow.Frees = 0
   300  		var bySize [_NumSizeClasses]struct {
   301  			Mallocs, Frees uint64
   302  		}
   303  
   304  		// Add up current allocations in spans.
   305  		for _, s := range mheap_.allspans {
   306  			if s.state != mSpanInUse {
   307  				continue
   308  			}
   309  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   310  				slow.Mallocs++
   311  				slow.Alloc += uint64(s.elemsize)
   312  			} else {
   313  				slow.Mallocs += uint64(s.allocCount)
   314  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   315  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   316  			}
   317  		}
   318  
   319  		// Add in frees. readmemstats_m flushed the cached stats, so
   320  		// these are up-to-date.
   321  		var smallFree uint64
   322  		slow.Frees = mheap_.nlargefree
   323  		for i := range mheap_.nsmallfree {
   324  			slow.Frees += mheap_.nsmallfree[i]
   325  			bySize[i].Frees = mheap_.nsmallfree[i]
   326  			bySize[i].Mallocs += mheap_.nsmallfree[i]
   327  			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
   328  		}
   329  		slow.Frees += memstats.tinyallocs
   330  		slow.Mallocs += slow.Frees
   331  
   332  		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
   333  
   334  		for i := range slow.BySize {
   335  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   336  			slow.BySize[i].Frees = bySize[i].Frees
   337  		}
   338  
   339  		getg().m.mallocing--
   340  	})
   341  
   342  	startTheWorld()
   343  	return
   344  }
   345  
   346  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   347  // stderr, and blocks in a stack containing
   348  // "runtime.blockOnSystemStackInternal".
   349  func BlockOnSystemStack() {
   350  	systemstack(blockOnSystemStackInternal)
   351  }
   352  
   353  func blockOnSystemStackInternal() {
   354  	print("x\n")
   355  	lock(&deadlock)
   356  	lock(&deadlock)
   357  }
   358  
   359  type RWMutex struct {
   360  	rw rwmutex
   361  }
   362  
   363  func (rw *RWMutex) RLock() {
   364  	rw.rw.rlock()
   365  }
   366  
   367  func (rw *RWMutex) RUnlock() {
   368  	rw.rw.runlock()
   369  }
   370  
   371  func (rw *RWMutex) Lock() {
   372  	rw.rw.lock()
   373  }
   374  
   375  func (rw *RWMutex) Unlock() {
   376  	rw.rw.unlock()
   377  }
   378  
   379  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   380  
   381  func MapBucketsCount(m map[int]int) int {
   382  	h := *(**hmap)(unsafe.Pointer(&m))
   383  	return 1 << h.B
   384  }
   385  
   386  func MapBucketsPointerIsNil(m map[int]int) bool {
   387  	h := *(**hmap)(unsafe.Pointer(&m))
   388  	return h.buckets == nil
   389  }
   390  
   391  func LockOSCounts() (external, internal uint32) {
   392  	g := getg()
   393  	if g.m.lockedExt+g.m.lockedInt == 0 {
   394  		if g.lockedm != 0 {
   395  			panic("lockedm on non-locked goroutine")
   396  		}
   397  	} else {
   398  		if g.lockedm == 0 {
   399  			panic("nil lockedm on locked goroutine")
   400  		}
   401  	}
   402  	return g.m.lockedExt, g.m.lockedInt
   403  }
   404  
   405  //go:noinline
   406  func TracebackSystemstack(stk []uintptr, i int) int {
   407  	if i == 0 {
   408  		pc, sp := getcallerpc(), getcallersp()
   409  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
   410  	}
   411  	n := 0
   412  	systemstack(func() {
   413  		n = TracebackSystemstack(stk, i-1)
   414  	})
   415  	return n
   416  }
   417  
   418  func KeepNArenaHints(n int) {
   419  	hint := mheap_.arenaHints
   420  	for i := 1; i < n; i++ {
   421  		hint = hint.next
   422  		if hint == nil {
   423  			return
   424  		}
   425  	}
   426  	hint.next = nil
   427  }
   428  
   429  // MapNextArenaHint reserves a page at the next arena growth hint,
   430  // preventing the arena from growing there, and returns the range of
   431  // addresses that are no longer viable.
   432  func MapNextArenaHint() (start, end uintptr) {
   433  	hint := mheap_.arenaHints
   434  	addr := hint.addr
   435  	if hint.down {
   436  		start, end = addr-heapArenaBytes, addr
   437  		addr -= physPageSize
   438  	} else {
   439  		start, end = addr, addr+heapArenaBytes
   440  	}
   441  	sysReserve(unsafe.Pointer(addr), physPageSize)
   442  	return
   443  }
   444  
   445  func GetNextArenaHint() uintptr {
   446  	return mheap_.arenaHints.addr
   447  }
   448  
   449  type G = g
   450  
   451  func Getg() *G {
   452  	return getg()
   453  }
   454  
   455  //go:noinline
   456  func PanicForTesting(b []byte, i int) byte {
   457  	return unexportedPanicForTesting(b, i)
   458  }
   459  
   460  //go:noinline
   461  func unexportedPanicForTesting(b []byte, i int) byte {
   462  	return b[i]
   463  }
   464  
   465  func G0StackOverflow() {
   466  	systemstack(func() {
   467  		stackOverflow(nil)
   468  	})
   469  }
   470  
   471  func stackOverflow(x *byte) {
   472  	var buf [256]byte
   473  	stackOverflow(&buf[0])
   474  }
   475  

View as plain text