Source file src/runtime/testdata/testprog/gc.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"os"
    11  	"runtime"
    12  	"runtime/debug"
    13  	"runtime/metrics"
    14  	"sync"
    15  	"sync/atomic"
    16  	"time"
    17  	"unsafe"
    18  )
    19  
    20  func init() {
    21  	register("GCFairness", GCFairness)
    22  	register("GCFairness2", GCFairness2)
    23  	register("GCSys", GCSys)
    24  	register("GCPhys", GCPhys)
    25  	register("DeferLiveness", DeferLiveness)
    26  	register("GCZombie", GCZombie)
    27  	register("GCMemoryLimit", GCMemoryLimit)
    28  	register("GCMemoryLimitNoGCPercent", GCMemoryLimitNoGCPercent)
    29  }
    30  
    31  func GCSys() {
    32  	runtime.GOMAXPROCS(1)
    33  	memstats := new(runtime.MemStats)
    34  	runtime.GC()
    35  	runtime.ReadMemStats(memstats)
    36  	sys := memstats.Sys
    37  
    38  	runtime.MemProfileRate = 0 // disable profiler
    39  
    40  	itercount := 100000
    41  	for i := 0; i < itercount; i++ {
    42  		workthegc()
    43  	}
    44  
    45  	// Should only be using a few MB.
    46  	// We allocated 100 MB or (if not short) 1 GB.
    47  	runtime.ReadMemStats(memstats)
    48  	if sys > memstats.Sys {
    49  		sys = 0
    50  	} else {
    51  		sys = memstats.Sys - sys
    52  	}
    53  	if sys > 16<<20 {
    54  		fmt.Printf("using too much memory: %d bytes\n", sys)
    55  		return
    56  	}
    57  	fmt.Printf("OK\n")
    58  }
    59  
    60  var sink []byte
    61  
    62  func workthegc() []byte {
    63  	sink = make([]byte, 1029)
    64  	return sink
    65  }
    66  
    67  func GCFairness() {
    68  	runtime.GOMAXPROCS(1)
    69  	f, err := os.Open("/dev/null")
    70  	if os.IsNotExist(err) {
    71  		// This test tests what it is intended to test only if writes are fast.
    72  		// If there is no /dev/null, we just don't execute the test.
    73  		fmt.Println("OK")
    74  		return
    75  	}
    76  	if err != nil {
    77  		fmt.Println(err)
    78  		os.Exit(1)
    79  	}
    80  	for i := 0; i < 2; i++ {
    81  		go func() {
    82  			for {
    83  				f.Write([]byte("."))
    84  			}
    85  		}()
    86  	}
    87  	time.Sleep(10 * time.Millisecond)
    88  	fmt.Println("OK")
    89  }
    90  
    91  func GCFairness2() {
    92  	// Make sure user code can't exploit the GC's high priority
    93  	// scheduling to make scheduling of user code unfair. See
    94  	// issue #15706.
    95  	runtime.GOMAXPROCS(1)
    96  	debug.SetGCPercent(1)
    97  	var count [3]int64
    98  	var sink [3]any
    99  	for i := range count {
   100  		go func(i int) {
   101  			for {
   102  				sink[i] = make([]byte, 1024)
   103  				atomic.AddInt64(&count[i], 1)
   104  			}
   105  		}(i)
   106  	}
   107  	// Note: If the unfairness is really bad, it may not even get
   108  	// past the sleep.
   109  	//
   110  	// If the scheduling rules change, this may not be enough time
   111  	// to let all goroutines run, but for now we cycle through
   112  	// them rapidly.
   113  	//
   114  	// OpenBSD's scheduler makes every usleep() take at least
   115  	// 20ms, so we need a long time to ensure all goroutines have
   116  	// run. If they haven't run after 30ms, give it another 1000ms
   117  	// and check again.
   118  	time.Sleep(30 * time.Millisecond)
   119  	var fail bool
   120  	for i := range count {
   121  		if atomic.LoadInt64(&count[i]) == 0 {
   122  			fail = true
   123  		}
   124  	}
   125  	if fail {
   126  		time.Sleep(1 * time.Second)
   127  		for i := range count {
   128  			if atomic.LoadInt64(&count[i]) == 0 {
   129  				fmt.Printf("goroutine %d did not run\n", i)
   130  				return
   131  			}
   132  		}
   133  	}
   134  	fmt.Println("OK")
   135  }
   136  
   137  func GCPhys() {
   138  	// This test ensures that heap-growth scavenging is working as intended.
   139  	//
   140  	// It attempts to construct a sizeable "swiss cheese" heap, with many
   141  	// allocChunk-sized holes. Then, it triggers a heap growth by trying to
   142  	// allocate as much memory as would fit in those holes.
   143  	//
   144  	// The heap growth should cause a large number of those holes to be
   145  	// returned to the OS.
   146  
   147  	const (
   148  		// The total amount of memory we're willing to allocate.
   149  		allocTotal = 32 << 20
   150  
   151  		// The page cache could hide 64 8-KiB pages from the scavenger today.
   152  		maxPageCache = (8 << 10) * 64
   153  	)
   154  
   155  	// How big the allocations are needs to depend on the page size.
   156  	// If the page size is too big and the allocations are too small,
   157  	// they might not be aligned to the physical page size, so the scavenger
   158  	// will gloss over them.
   159  	pageSize := os.Getpagesize()
   160  	var allocChunk int
   161  	if pageSize <= 8<<10 {
   162  		allocChunk = 64 << 10
   163  	} else {
   164  		allocChunk = 512 << 10
   165  	}
   166  	allocs := allocTotal / allocChunk
   167  
   168  	// Set GC percent just so this test is a little more consistent in the
   169  	// face of varying environments.
   170  	debug.SetGCPercent(100)
   171  
   172  	// Set GOMAXPROCS to 1 to minimize the amount of memory held in the page cache,
   173  	// and to reduce the chance that the background scavenger gets scheduled.
   174  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   175  
   176  	// Allocate allocTotal bytes of memory in allocChunk byte chunks.
   177  	// Alternate between whether the chunk will be held live or will be
   178  	// condemned to GC to create holes in the heap.
   179  	saved := make([][]byte, allocs/2+1)
   180  	condemned := make([][]byte, allocs/2)
   181  	for i := 0; i < allocs; i++ {
   182  		b := make([]byte, allocChunk)
   183  		if i%2 == 0 {
   184  			saved = append(saved, b)
   185  		} else {
   186  			condemned = append(condemned, b)
   187  		}
   188  	}
   189  
   190  	// Run a GC cycle just so we're at a consistent state.
   191  	runtime.GC()
   192  
   193  	// Drop the only reference to all the condemned memory.
   194  	condemned = nil
   195  
   196  	// Clear the condemned memory.
   197  	runtime.GC()
   198  
   199  	// At this point, the background scavenger is likely running
   200  	// and could pick up the work, so the next line of code doesn't
   201  	// end up doing anything. That's fine. What's important is that
   202  	// this test fails somewhat regularly if the runtime doesn't
   203  	// scavenge on heap growth, and doesn't fail at all otherwise.
   204  
   205  	// Make a large allocation that in theory could fit, but won't
   206  	// because we turned the heap into swiss cheese.
   207  	saved = append(saved, make([]byte, allocTotal/2))
   208  
   209  	// heapBacked is an estimate of the amount of physical memory used by
   210  	// this test. HeapSys is an estimate of the size of the mapped virtual
   211  	// address space (which may or may not be backed by physical pages)
   212  	// whereas HeapReleased is an estimate of the amount of bytes returned
   213  	// to the OS. Their difference then roughly corresponds to the amount
   214  	// of virtual address space that is backed by physical pages.
   215  	//
   216  	// heapBacked also subtracts out maxPageCache bytes of memory because
   217  	// this is memory that may be hidden from the scavenger per-P. Since
   218  	// GOMAXPROCS=1 here, subtracting it out once is fine.
   219  	var stats runtime.MemStats
   220  	runtime.ReadMemStats(&stats)
   221  	heapBacked := stats.HeapSys - stats.HeapReleased - maxPageCache
   222  	// If heapBacked does not exceed the heap goal by more than retainExtraPercent
   223  	// then the scavenger is working as expected; the newly-created holes have been
   224  	// scavenged immediately as part of the allocations which cannot fit in the holes.
   225  	//
   226  	// Since the runtime should scavenge the entirety of the remaining holes,
   227  	// theoretically there should be no more free and unscavenged memory. However due
   228  	// to other allocations that happen during this test we may still see some physical
   229  	// memory over-use.
   230  	overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
   231  	// Check against our overuse threshold, which is what the scavenger always reserves
   232  	// to encourage allocation of memory that doesn't need to be faulted in.
   233  	//
   234  	// Add additional slack in case the page size is large and the scavenger
   235  	// can't reach that memory because it doesn't constitute a complete aligned
   236  	// physical page. Assume the worst case: a full physical page out of each
   237  	// allocation.
   238  	threshold := 0.1 + float64(pageSize)/float64(allocChunk)
   239  	if overuse <= threshold {
   240  		fmt.Println("OK")
   241  		return
   242  	}
   243  	// Physical memory utilization exceeds the threshold, so heap-growth scavenging
   244  	// did not operate as expected.
   245  	//
   246  	// In the context of this test, this indicates a large amount of
   247  	// fragmentation with physical pages that are otherwise unused but not
   248  	// returned to the OS.
   249  	fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+
   250  		"(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100,
   251  		stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved))
   252  	runtime.KeepAlive(saved)
   253  	runtime.KeepAlive(condemned)
   254  }
   255  
   256  // Test that defer closure is correctly scanned when the stack is scanned.
   257  func DeferLiveness() {
   258  	var x [10]int
   259  	escape(&x)
   260  	fn := func() {
   261  		if x[0] != 42 {
   262  			panic("FAIL")
   263  		}
   264  	}
   265  	defer fn()
   266  
   267  	x[0] = 42
   268  	runtime.GC()
   269  	runtime.GC()
   270  	runtime.GC()
   271  }
   272  
   273  //go:noinline
   274  func escape(x any) { sink2 = x; sink2 = nil }
   275  
   276  var sink2 any
   277  
   278  // Test zombie object detection and reporting.
   279  func GCZombie() {
   280  	// Allocate several objects of unusual size (so free slots are
   281  	// unlikely to all be re-allocated by the runtime).
   282  	const size = 190
   283  	const count = 8192 / size
   284  	keep := make([]*byte, 0, (count+1)/2)
   285  	free := make([]uintptr, 0, (count+1)/2)
   286  	zombies := make([]*byte, 0, len(free))
   287  	for i := 0; i < count; i++ {
   288  		obj := make([]byte, size)
   289  		p := &obj[0]
   290  		if i%2 == 0 {
   291  			keep = append(keep, p)
   292  		} else {
   293  			free = append(free, uintptr(unsafe.Pointer(p)))
   294  		}
   295  	}
   296  
   297  	// Free the unreferenced objects.
   298  	runtime.GC()
   299  
   300  	// Bring the free objects back to life.
   301  	for _, p := range free {
   302  		zombies = append(zombies, (*byte)(unsafe.Pointer(p)))
   303  	}
   304  
   305  	// GC should detect the zombie objects.
   306  	runtime.GC()
   307  	println("failed")
   308  	runtime.KeepAlive(keep)
   309  	runtime.KeepAlive(zombies)
   310  }
   311  
   312  func GCMemoryLimit() {
   313  	gcMemoryLimit(100)
   314  }
   315  
   316  func GCMemoryLimitNoGCPercent() {
   317  	gcMemoryLimit(-1)
   318  }
   319  
   320  // Test SetMemoryLimit functionality.
   321  //
   322  // This test lives here instead of runtime/debug because the entire
   323  // implementation is in the runtime, and testprog gives us a more
   324  // consistent testing environment to help avoid flakiness.
   325  func gcMemoryLimit(gcPercent int) {
   326  	if oldProcs := runtime.GOMAXPROCS(4); oldProcs < 4 {
   327  		// Fail if the default GOMAXPROCS isn't at least 4.
   328  		// Whatever invokes this should check and do a proper t.Skip.
   329  		println("insufficient CPUs")
   330  		return
   331  	}
   332  	debug.SetGCPercent(gcPercent)
   333  
   334  	const myLimit = 256 << 20
   335  	if limit := debug.SetMemoryLimit(-1); limit != math.MaxInt64 {
   336  		print("expected MaxInt64 limit, got ", limit, " bytes instead\n")
   337  		return
   338  	}
   339  	if limit := debug.SetMemoryLimit(myLimit); limit != math.MaxInt64 {
   340  		print("expected MaxInt64 limit, got ", limit, " bytes instead\n")
   341  		return
   342  	}
   343  	if limit := debug.SetMemoryLimit(-1); limit != myLimit {
   344  		print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n")
   345  		return
   346  	}
   347  
   348  	target := make(chan int64)
   349  	var wg sync.WaitGroup
   350  	wg.Add(1)
   351  	go func() {
   352  		defer wg.Done()
   353  
   354  		sinkSize := int(<-target / memLimitUnit)
   355  		for {
   356  			if len(memLimitSink) != sinkSize {
   357  				memLimitSink = make([]*[memLimitUnit]byte, sinkSize)
   358  			}
   359  			for i := 0; i < len(memLimitSink); i++ {
   360  				memLimitSink[i] = new([memLimitUnit]byte)
   361  				// Write to this memory to slow down the allocator, otherwise
   362  				// we get flaky behavior. See #52433.
   363  				for j := range memLimitSink[i] {
   364  					memLimitSink[i][j] = 9
   365  				}
   366  			}
   367  			// Again, Gosched to slow down the allocator.
   368  			runtime.Gosched()
   369  			select {
   370  			case newTarget := <-target:
   371  				if newTarget == math.MaxInt64 {
   372  					return
   373  				}
   374  				sinkSize = int(newTarget / memLimitUnit)
   375  			default:
   376  			}
   377  		}
   378  	}()
   379  	var m [2]metrics.Sample
   380  	m[0].Name = "/memory/classes/total:bytes"
   381  	m[1].Name = "/memory/classes/heap/released:bytes"
   382  
   383  	// Don't set this too high, because this is a *live heap* target which
   384  	// is not directly comparable to a total memory limit.
   385  	maxTarget := int64((myLimit / 10) * 8)
   386  	increment := int64((myLimit / 10) * 1)
   387  	for i := increment; i < maxTarget; i += increment {
   388  		target <- i
   389  
   390  		// Check to make sure the memory limit is maintained.
   391  		// We're just sampling here so if it transiently goes over we might miss it.
   392  		// The internal accounting is inconsistent anyway, so going over by a few
   393  		// pages is certainly possible. Just make sure we're within some bound.
   394  		// Note that to avoid flakiness due to #52433 (especially since we're allocating
   395  		// somewhat heavily here) this bound is kept loose. In practice the Go runtime
   396  		// should do considerably better than this bound.
   397  		bound := int64(myLimit + 16<<20)
   398  		start := time.Now()
   399  		for time.Since(start) < 200*time.Millisecond {
   400  			metrics.Read(m[:])
   401  			retained := int64(m[0].Value.Uint64() - m[1].Value.Uint64())
   402  			if retained > bound {
   403  				print("retained=", retained, " limit=", myLimit, " bound=", bound, "\n")
   404  				panic("exceeded memory limit by more than bound allows")
   405  			}
   406  			runtime.Gosched()
   407  		}
   408  	}
   409  
   410  	if limit := debug.SetMemoryLimit(math.MaxInt64); limit != myLimit {
   411  		print("expected a ", myLimit, "-byte limit, got ", limit, " bytes instead\n")
   412  		return
   413  	}
   414  	println("OK")
   415  }
   416  
   417  // Pick a value close to the page size. We want to m
   418  const memLimitUnit = 8000
   419  
   420  var memLimitSink []*[memLimitUnit]byte
   421  

View as plain text