Source file src/runtime/proc_test.go

Documentation: runtime

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"net"
    11  	"runtime"
    12  	"runtime/debug"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"syscall"
    17  	"testing"
    18  	"time"
    19  )
    20  
    21  var stop = make(chan bool, 1)
    22  
    23  func perpetuumMobile() {
    24  	select {
    25  	case <-stop:
    26  	default:
    27  		go perpetuumMobile()
    28  	}
    29  }
    30  
    31  func TestStopTheWorldDeadlock(t *testing.T) {
    32  	if runtime.GOARCH == "wasm" {
    33  		t.Skip("no preemption on wasm yet")
    34  	}
    35  	if testing.Short() {
    36  		t.Skip("skipping during short test")
    37  	}
    38  	maxprocs := runtime.GOMAXPROCS(3)
    39  	compl := make(chan bool, 2)
    40  	go func() {
    41  		for i := 0; i != 1000; i += 1 {
    42  			runtime.GC()
    43  		}
    44  		compl <- true
    45  	}()
    46  	go func() {
    47  		for i := 0; i != 1000; i += 1 {
    48  			runtime.GOMAXPROCS(3)
    49  		}
    50  		compl <- true
    51  	}()
    52  	go perpetuumMobile()
    53  	<-compl
    54  	<-compl
    55  	stop <- true
    56  	runtime.GOMAXPROCS(maxprocs)
    57  }
    58  
    59  func TestYieldProgress(t *testing.T) {
    60  	testYieldProgress(false)
    61  }
    62  
    63  func TestYieldLockedProgress(t *testing.T) {
    64  	testYieldProgress(true)
    65  }
    66  
    67  func testYieldProgress(locked bool) {
    68  	c := make(chan bool)
    69  	cack := make(chan bool)
    70  	go func() {
    71  		if locked {
    72  			runtime.LockOSThread()
    73  		}
    74  		for {
    75  			select {
    76  			case <-c:
    77  				cack <- true
    78  				return
    79  			default:
    80  				runtime.Gosched()
    81  			}
    82  		}
    83  	}()
    84  	time.Sleep(10 * time.Millisecond)
    85  	c <- true
    86  	<-cack
    87  }
    88  
    89  func TestYieldLocked(t *testing.T) {
    90  	const N = 10
    91  	c := make(chan bool)
    92  	go func() {
    93  		runtime.LockOSThread()
    94  		for i := 0; i < N; i++ {
    95  			runtime.Gosched()
    96  			time.Sleep(time.Millisecond)
    97  		}
    98  		c <- true
    99  		// runtime.UnlockOSThread() is deliberately omitted
   100  	}()
   101  	<-c
   102  }
   103  
   104  func TestGoroutineParallelism(t *testing.T) {
   105  	if runtime.NumCPU() == 1 {
   106  		// Takes too long, too easy to deadlock, etc.
   107  		t.Skip("skipping on uniprocessor")
   108  	}
   109  	P := 4
   110  	N := 10
   111  	if testing.Short() {
   112  		P = 3
   113  		N = 3
   114  	}
   115  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   116  	// If runtime triggers a forced GC during this test then it will deadlock,
   117  	// since the goroutines can't be stopped/preempted.
   118  	// Disable GC for this test (see issue #10958).
   119  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   120  	for try := 0; try < N; try++ {
   121  		done := make(chan bool)
   122  		x := uint32(0)
   123  		for p := 0; p < P; p++ {
   124  			// Test that all P goroutines are scheduled at the same time
   125  			go func(p int) {
   126  				for i := 0; i < 3; i++ {
   127  					expected := uint32(P*i + p)
   128  					for atomic.LoadUint32(&x) != expected {
   129  					}
   130  					atomic.StoreUint32(&x, expected+1)
   131  				}
   132  				done <- true
   133  			}(p)
   134  		}
   135  		for p := 0; p < P; p++ {
   136  			<-done
   137  		}
   138  	}
   139  }
   140  
   141  // Test that all runnable goroutines are scheduled at the same time.
   142  func TestGoroutineParallelism2(t *testing.T) {
   143  	//testGoroutineParallelism2(t, false, false)
   144  	testGoroutineParallelism2(t, true, false)
   145  	testGoroutineParallelism2(t, false, true)
   146  	testGoroutineParallelism2(t, true, true)
   147  }
   148  
   149  func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
   150  	if runtime.NumCPU() == 1 {
   151  		// Takes too long, too easy to deadlock, etc.
   152  		t.Skip("skipping on uniprocessor")
   153  	}
   154  	P := 4
   155  	N := 10
   156  	if testing.Short() {
   157  		N = 3
   158  	}
   159  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   160  	// If runtime triggers a forced GC during this test then it will deadlock,
   161  	// since the goroutines can't be stopped/preempted.
   162  	// Disable GC for this test (see issue #10958).
   163  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   164  	for try := 0; try < N; try++ {
   165  		if load {
   166  			// Create P goroutines and wait until they all run.
   167  			// When we run the actual test below, worker threads
   168  			// running the goroutines will start parking.
   169  			done := make(chan bool)
   170  			x := uint32(0)
   171  			for p := 0; p < P; p++ {
   172  				go func() {
   173  					if atomic.AddUint32(&x, 1) == uint32(P) {
   174  						done <- true
   175  						return
   176  					}
   177  					for atomic.LoadUint32(&x) != uint32(P) {
   178  					}
   179  				}()
   180  			}
   181  			<-done
   182  		}
   183  		if netpoll {
   184  			// Enable netpoller, affects schedler behavior.
   185  			laddr := "localhost:0"
   186  			if runtime.GOOS == "android" {
   187  				// On some Android devices, there are no records for localhost,
   188  				// see https://golang.org/issues/14486.
   189  				// Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
   190  				laddr = "127.0.0.1:0"
   191  			}
   192  			ln, err := net.Listen("tcp", laddr)
   193  			if err != nil {
   194  				defer ln.Close() // yup, defer in a loop
   195  			}
   196  		}
   197  		done := make(chan bool)
   198  		x := uint32(0)
   199  		// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
   200  		for p := 0; p < P/2; p++ {
   201  			go func(p int) {
   202  				for p2 := 0; p2 < 2; p2++ {
   203  					go func(p2 int) {
   204  						for i := 0; i < 3; i++ {
   205  							expected := uint32(P*i + p*2 + p2)
   206  							for atomic.LoadUint32(&x) != expected {
   207  							}
   208  							atomic.StoreUint32(&x, expected+1)
   209  						}
   210  						done <- true
   211  					}(p2)
   212  				}
   213  			}(p)
   214  		}
   215  		for p := 0; p < P; p++ {
   216  			<-done
   217  		}
   218  	}
   219  }
   220  
   221  func TestBlockLocked(t *testing.T) {
   222  	const N = 10
   223  	c := make(chan bool)
   224  	go func() {
   225  		runtime.LockOSThread()
   226  		for i := 0; i < N; i++ {
   227  			c <- true
   228  		}
   229  		runtime.UnlockOSThread()
   230  	}()
   231  	for i := 0; i < N; i++ {
   232  		<-c
   233  	}
   234  }
   235  
   236  func TestTimerFairness(t *testing.T) {
   237  	if runtime.GOARCH == "wasm" {
   238  		t.Skip("no preemption on wasm yet")
   239  	}
   240  
   241  	done := make(chan bool)
   242  	c := make(chan bool)
   243  	for i := 0; i < 2; i++ {
   244  		go func() {
   245  			for {
   246  				select {
   247  				case c <- true:
   248  				case <-done:
   249  					return
   250  				}
   251  			}
   252  		}()
   253  	}
   254  
   255  	timer := time.After(20 * time.Millisecond)
   256  	for {
   257  		select {
   258  		case <-c:
   259  		case <-timer:
   260  			close(done)
   261  			return
   262  		}
   263  	}
   264  }
   265  
   266  func TestTimerFairness2(t *testing.T) {
   267  	if runtime.GOARCH == "wasm" {
   268  		t.Skip("no preemption on wasm yet")
   269  	}
   270  
   271  	done := make(chan bool)
   272  	c := make(chan bool)
   273  	for i := 0; i < 2; i++ {
   274  		go func() {
   275  			timer := time.After(20 * time.Millisecond)
   276  			var buf [1]byte
   277  			for {
   278  				syscall.Read(0, buf[0:0])
   279  				select {
   280  				case c <- true:
   281  				case <-c:
   282  				case <-timer:
   283  					done <- true
   284  					return
   285  				}
   286  			}
   287  		}()
   288  	}
   289  	<-done
   290  	<-done
   291  }
   292  
   293  // The function is used to test preemption at split stack checks.
   294  // Declaring a var avoids inlining at the call site.
   295  var preempt = func() int {
   296  	var a [128]int
   297  	sum := 0
   298  	for _, v := range a {
   299  		sum += v
   300  	}
   301  	return sum
   302  }
   303  
   304  func TestPreemption(t *testing.T) {
   305  	if runtime.GOARCH == "wasm" {
   306  		t.Skip("no preemption on wasm yet")
   307  	}
   308  
   309  	// Test that goroutines are preempted at function calls.
   310  	N := 5
   311  	if testing.Short() {
   312  		N = 2
   313  	}
   314  	c := make(chan bool)
   315  	var x uint32
   316  	for g := 0; g < 2; g++ {
   317  		go func(g int) {
   318  			for i := 0; i < N; i++ {
   319  				for atomic.LoadUint32(&x) != uint32(g) {
   320  					preempt()
   321  				}
   322  				atomic.StoreUint32(&x, uint32(1-g))
   323  			}
   324  			c <- true
   325  		}(g)
   326  	}
   327  	<-c
   328  	<-c
   329  }
   330  
   331  func TestPreemptionGC(t *testing.T) {
   332  	if runtime.GOARCH == "wasm" {
   333  		t.Skip("no preemption on wasm yet")
   334  	}
   335  
   336  	// Test that pending GC preempts running goroutines.
   337  	P := 5
   338  	N := 10
   339  	if testing.Short() {
   340  		P = 3
   341  		N = 2
   342  	}
   343  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
   344  	var stop uint32
   345  	for i := 0; i < P; i++ {
   346  		go func() {
   347  			for atomic.LoadUint32(&stop) == 0 {
   348  				preempt()
   349  			}
   350  		}()
   351  	}
   352  	for i := 0; i < N; i++ {
   353  		runtime.Gosched()
   354  		runtime.GC()
   355  	}
   356  	atomic.StoreUint32(&stop, 1)
   357  }
   358  
   359  func TestGCFairness(t *testing.T) {
   360  	output := runTestProg(t, "testprog", "GCFairness")
   361  	want := "OK\n"
   362  	if output != want {
   363  		t.Fatalf("want %s, got %s\n", want, output)
   364  	}
   365  }
   366  
   367  func TestGCFairness2(t *testing.T) {
   368  	output := runTestProg(t, "testprog", "GCFairness2")
   369  	want := "OK\n"
   370  	if output != want {
   371  		t.Fatalf("want %s, got %s\n", want, output)
   372  	}
   373  }
   374  
   375  func TestNumGoroutine(t *testing.T) {
   376  	output := runTestProg(t, "testprog", "NumGoroutine")
   377  	want := "1\n"
   378  	if output != want {
   379  		t.Fatalf("want %q, got %q", want, output)
   380  	}
   381  
   382  	buf := make([]byte, 1<<20)
   383  
   384  	// Try up to 10 times for a match before giving up.
   385  	// This is a fundamentally racy check but it's important
   386  	// to notice if NumGoroutine and Stack are _always_ out of sync.
   387  	for i := 0; ; i++ {
   388  		// Give goroutines about to exit a chance to exit.
   389  		// The NumGoroutine and Stack below need to see
   390  		// the same state of the world, so anything we can do
   391  		// to keep it quiet is good.
   392  		runtime.Gosched()
   393  
   394  		n := runtime.NumGoroutine()
   395  		buf = buf[:runtime.Stack(buf, true)]
   396  
   397  		nstk := strings.Count(string(buf), "goroutine ")
   398  		if n == nstk {
   399  			break
   400  		}
   401  		if i >= 10 {
   402  			t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
   403  		}
   404  	}
   405  }
   406  
   407  func TestPingPongHog(t *testing.T) {
   408  	if runtime.GOARCH == "wasm" {
   409  		t.Skip("no preemption on wasm yet")
   410  	}
   411  	if testing.Short() {
   412  		t.Skip("skipping in -short mode")
   413  	}
   414  
   415  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   416  	done := make(chan bool)
   417  	hogChan, lightChan := make(chan bool), make(chan bool)
   418  	hogCount, lightCount := 0, 0
   419  
   420  	run := func(limit int, counter *int, wake chan bool) {
   421  		for {
   422  			select {
   423  			case <-done:
   424  				return
   425  
   426  			case <-wake:
   427  				for i := 0; i < limit; i++ {
   428  					*counter++
   429  				}
   430  				wake <- true
   431  			}
   432  		}
   433  	}
   434  
   435  	// Start two co-scheduled hog goroutines.
   436  	for i := 0; i < 2; i++ {
   437  		go run(1e6, &hogCount, hogChan)
   438  	}
   439  
   440  	// Start two co-scheduled light goroutines.
   441  	for i := 0; i < 2; i++ {
   442  		go run(1e3, &lightCount, lightChan)
   443  	}
   444  
   445  	// Start goroutine pairs and wait for a few preemption rounds.
   446  	hogChan <- true
   447  	lightChan <- true
   448  	time.Sleep(100 * time.Millisecond)
   449  	close(done)
   450  	<-hogChan
   451  	<-lightChan
   452  
   453  	// Check that hogCount and lightCount are within a factor of
   454  	// 5, which indicates that both pairs of goroutines handed off
   455  	// the P within a time-slice to their buddy. We can use a
   456  	// fairly large factor here to make this robust: if the
   457  	// scheduler isn't working right, the gap should be ~1000X.
   458  	const factor = 5
   459  	if hogCount > lightCount*factor || lightCount > hogCount*factor {
   460  		t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
   461  	}
   462  }
   463  
   464  func BenchmarkPingPongHog(b *testing.B) {
   465  	if b.N == 0 {
   466  		return
   467  	}
   468  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   469  
   470  	// Create a CPU hog
   471  	stop, done := make(chan bool), make(chan bool)
   472  	go func() {
   473  		for {
   474  			select {
   475  			case <-stop:
   476  				done <- true
   477  				return
   478  			default:
   479  			}
   480  		}
   481  	}()
   482  
   483  	// Ping-pong b.N times
   484  	ping, pong := make(chan bool), make(chan bool)
   485  	go func() {
   486  		for j := 0; j < b.N; j++ {
   487  			pong <- <-ping
   488  		}
   489  		close(stop)
   490  		done <- true
   491  	}()
   492  	go func() {
   493  		for i := 0; i < b.N; i++ {
   494  			ping <- <-pong
   495  		}
   496  		done <- true
   497  	}()
   498  	b.ResetTimer()
   499  	ping <- true // Start ping-pong
   500  	<-stop
   501  	b.StopTimer()
   502  	<-ping // Let last ponger exit
   503  	<-done // Make sure goroutines exit
   504  	<-done
   505  	<-done
   506  }
   507  
   508  func stackGrowthRecursive(i int) {
   509  	var pad [128]uint64
   510  	if i != 0 && pad[0] == 0 {
   511  		stackGrowthRecursive(i - 1)
   512  	}
   513  }
   514  
   515  func TestPreemptSplitBig(t *testing.T) {
   516  	if testing.Short() {
   517  		t.Skip("skipping in -short mode")
   518  	}
   519  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   520  	stop := make(chan int)
   521  	go big(stop)
   522  	for i := 0; i < 3; i++ {
   523  		time.Sleep(10 * time.Microsecond) // let big start running
   524  		runtime.GC()
   525  	}
   526  	close(stop)
   527  }
   528  
   529  func big(stop chan int) int {
   530  	n := 0
   531  	for {
   532  		// delay so that gc is sure to have asked for a preemption
   533  		for i := 0; i < 1e9; i++ {
   534  			n++
   535  		}
   536  
   537  		// call bigframe, which used to miss the preemption in its prologue.
   538  		bigframe(stop)
   539  
   540  		// check if we've been asked to stop.
   541  		select {
   542  		case <-stop:
   543  			return n
   544  		}
   545  	}
   546  }
   547  
   548  func bigframe(stop chan int) int {
   549  	// not splitting the stack will overflow.
   550  	// small will notice that it needs a stack split and will
   551  	// catch the overflow.
   552  	var x [8192]byte
   553  	return small(stop, &x)
   554  }
   555  
   556  func small(stop chan int, x *[8192]byte) int {
   557  	for i := range x {
   558  		x[i] = byte(i)
   559  	}
   560  	sum := 0
   561  	for i := range x {
   562  		sum += int(x[i])
   563  	}
   564  
   565  	// keep small from being a leaf function, which might
   566  	// make it not do any stack check at all.
   567  	nonleaf(stop)
   568  
   569  	return sum
   570  }
   571  
   572  func nonleaf(stop chan int) bool {
   573  	// do something that won't be inlined:
   574  	select {
   575  	case <-stop:
   576  		return true
   577  	default:
   578  		return false
   579  	}
   580  }
   581  
   582  func TestSchedLocalQueue(t *testing.T) {
   583  	runtime.RunSchedLocalQueueTest()
   584  }
   585  
   586  func TestSchedLocalQueueSteal(t *testing.T) {
   587  	runtime.RunSchedLocalQueueStealTest()
   588  }
   589  
   590  func TestSchedLocalQueueEmpty(t *testing.T) {
   591  	if runtime.NumCPU() == 1 {
   592  		// Takes too long and does not trigger the race.
   593  		t.Skip("skipping on uniprocessor")
   594  	}
   595  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   596  
   597  	// If runtime triggers a forced GC during this test then it will deadlock,
   598  	// since the goroutines can't be stopped/preempted during spin wait.
   599  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   600  
   601  	iters := int(1e5)
   602  	if testing.Short() {
   603  		iters = 1e2
   604  	}
   605  	runtime.RunSchedLocalQueueEmptyTest(iters)
   606  }
   607  
   608  func benchmarkStackGrowth(b *testing.B, rec int) {
   609  	b.RunParallel(func(pb *testing.PB) {
   610  		for pb.Next() {
   611  			stackGrowthRecursive(rec)
   612  		}
   613  	})
   614  }
   615  
   616  func BenchmarkStackGrowth(b *testing.B) {
   617  	benchmarkStackGrowth(b, 10)
   618  }
   619  
   620  func BenchmarkStackGrowthDeep(b *testing.B) {
   621  	benchmarkStackGrowth(b, 1024)
   622  }
   623  
   624  func BenchmarkCreateGoroutines(b *testing.B) {
   625  	benchmarkCreateGoroutines(b, 1)
   626  }
   627  
   628  func BenchmarkCreateGoroutinesParallel(b *testing.B) {
   629  	benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
   630  }
   631  
   632  func benchmarkCreateGoroutines(b *testing.B, procs int) {
   633  	c := make(chan bool)
   634  	var f func(n int)
   635  	f = func(n int) {
   636  		if n == 0 {
   637  			c <- true
   638  			return
   639  		}
   640  		go f(n - 1)
   641  	}
   642  	for i := 0; i < procs; i++ {
   643  		go f(b.N / procs)
   644  	}
   645  	for i := 0; i < procs; i++ {
   646  		<-c
   647  	}
   648  }
   649  
   650  func BenchmarkCreateGoroutinesCapture(b *testing.B) {
   651  	b.ReportAllocs()
   652  	for i := 0; i < b.N; i++ {
   653  		const N = 4
   654  		var wg sync.WaitGroup
   655  		wg.Add(N)
   656  		for i := 0; i < N; i++ {
   657  			i := i
   658  			go func() {
   659  				if i >= N {
   660  					b.Logf("bad") // just to capture b
   661  				}
   662  				wg.Done()
   663  			}()
   664  		}
   665  		wg.Wait()
   666  	}
   667  }
   668  
   669  func BenchmarkClosureCall(b *testing.B) {
   670  	sum := 0
   671  	off1 := 1
   672  	for i := 0; i < b.N; i++ {
   673  		off2 := 2
   674  		func() {
   675  			sum += i + off1 + off2
   676  		}()
   677  	}
   678  	_ = sum
   679  }
   680  
   681  func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
   682  	if runtime.GOMAXPROCS(0) == 1 {
   683  		b.Skip("skipping: GOMAXPROCS=1")
   684  	}
   685  
   686  	wakeDelay := 5 * time.Microsecond
   687  	for _, delay := range []time.Duration{
   688  		0,
   689  		1 * time.Microsecond,
   690  		2 * time.Microsecond,
   691  		5 * time.Microsecond,
   692  		10 * time.Microsecond,
   693  		20 * time.Microsecond,
   694  		50 * time.Microsecond,
   695  		100 * time.Microsecond,
   696  	} {
   697  		b.Run(delay.String(), func(b *testing.B) {
   698  			if b.N == 0 {
   699  				return
   700  			}
   701  			// Start two goroutines, which alternate between being
   702  			// sender and receiver in the following protocol:
   703  			//
   704  			// - The receiver spins for `delay` and then does a
   705  			// blocking receive on a channel.
   706  			//
   707  			// - The sender spins for `delay+wakeDelay` and then
   708  			// sends to the same channel. (The addition of
   709  			// `wakeDelay` improves the probability that the
   710  			// receiver will be blocking when the send occurs when
   711  			// the goroutines execute in parallel.)
   712  			//
   713  			// In each iteration of the benchmark, each goroutine
   714  			// acts once as sender and once as receiver, so each
   715  			// goroutine spins for delay twice.
   716  			//
   717  			// BenchmarkWakeupParallel is used to estimate how
   718  			// efficiently the scheduler parallelizes goroutines in
   719  			// the presence of blocking:
   720  			//
   721  			// - If both goroutines are executed on the same core,
   722  			// an increase in delay by N will increase the time per
   723  			// iteration by 4*N, because all 4 delays are
   724  			// serialized.
   725  			//
   726  			// - Otherwise, an increase in delay by N will increase
   727  			// the time per iteration by 2*N, and the time per
   728  			// iteration is 2 * (runtime overhead + chan
   729  			// send/receive pair + delay + wakeDelay). This allows
   730  			// the runtime overhead, including the time it takes
   731  			// for the unblocked goroutine to be scheduled, to be
   732  			// estimated.
   733  			ping, pong := make(chan struct{}), make(chan struct{})
   734  			start := make(chan struct{})
   735  			done := make(chan struct{})
   736  			go func() {
   737  				<-start
   738  				for i := 0; i < b.N; i++ {
   739  					// sender
   740  					spin(delay + wakeDelay)
   741  					ping <- struct{}{}
   742  					// receiver
   743  					spin(delay)
   744  					<-pong
   745  				}
   746  				done <- struct{}{}
   747  			}()
   748  			go func() {
   749  				for i := 0; i < b.N; i++ {
   750  					// receiver
   751  					spin(delay)
   752  					<-ping
   753  					// sender
   754  					spin(delay + wakeDelay)
   755  					pong <- struct{}{}
   756  				}
   757  				done <- struct{}{}
   758  			}()
   759  			b.ResetTimer()
   760  			start <- struct{}{}
   761  			<-done
   762  			<-done
   763  		})
   764  	}
   765  }
   766  
   767  func BenchmarkWakeupParallelSpinning(b *testing.B) {
   768  	benchmarkWakeupParallel(b, func(d time.Duration) {
   769  		end := time.Now().Add(d)
   770  		for time.Now().Before(end) {
   771  			// do nothing
   772  		}
   773  	})
   774  }
   775  
   776  // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
   777  // to sleep for the given duration. If nil, dependent tests are skipped.
   778  // The implementation should invoke a blocking system call and not
   779  // call time.Sleep, which would deschedule the goroutine.
   780  var sysNanosleep func(d time.Duration)
   781  
   782  func BenchmarkWakeupParallelSyscall(b *testing.B) {
   783  	if sysNanosleep == nil {
   784  		b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
   785  	}
   786  	benchmarkWakeupParallel(b, func(d time.Duration) {
   787  		sysNanosleep(d)
   788  	})
   789  }
   790  
   791  type Matrix [][]float64
   792  
   793  func BenchmarkMatmult(b *testing.B) {
   794  	b.StopTimer()
   795  	// matmult is O(N**3) but testing expects O(b.N),
   796  	// so we need to take cube root of b.N
   797  	n := int(math.Cbrt(float64(b.N))) + 1
   798  	A := makeMatrix(n)
   799  	B := makeMatrix(n)
   800  	C := makeMatrix(n)
   801  	b.StartTimer()
   802  	matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
   803  }
   804  
   805  func makeMatrix(n int) Matrix {
   806  	m := make(Matrix, n)
   807  	for i := 0; i < n; i++ {
   808  		m[i] = make([]float64, n)
   809  		for j := 0; j < n; j++ {
   810  			m[i][j] = float64(i*n + j)
   811  		}
   812  	}
   813  	return m
   814  }
   815  
   816  func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
   817  	di := i1 - i0
   818  	dj := j1 - j0
   819  	dk := k1 - k0
   820  	if di >= dj && di >= dk && di >= threshold {
   821  		// divide in two by y axis
   822  		mi := i0 + di/2
   823  		done1 := make(chan struct{}, 1)
   824  		go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
   825  		matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
   826  		<-done1
   827  	} else if dj >= dk && dj >= threshold {
   828  		// divide in two by x axis
   829  		mj := j0 + dj/2
   830  		done1 := make(chan struct{}, 1)
   831  		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
   832  		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
   833  		<-done1
   834  	} else if dk >= threshold {
   835  		// divide in two by "k" axis
   836  		// deliberately not parallel because of data races
   837  		mk := k0 + dk/2
   838  		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
   839  		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
   840  	} else {
   841  		// the matrices are small enough, compute directly
   842  		for i := i0; i < i1; i++ {
   843  			for j := j0; j < j1; j++ {
   844  				for k := k0; k < k1; k++ {
   845  					C[i][j] += A[i][k] * B[k][j]
   846  				}
   847  			}
   848  		}
   849  	}
   850  	if done != nil {
   851  		done <- struct{}{}
   852  	}
   853  }
   854  
   855  func TestStealOrder(t *testing.T) {
   856  	runtime.RunStealOrderTest()
   857  }
   858  
   859  func TestLockOSThreadNesting(t *testing.T) {
   860  	if runtime.GOARCH == "wasm" {
   861  		t.Skip("no threads on wasm yet")
   862  	}
   863  
   864  	go func() {
   865  		e, i := runtime.LockOSCounts()
   866  		if e != 0 || i != 0 {
   867  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   868  			return
   869  		}
   870  		runtime.LockOSThread()
   871  		runtime.LockOSThread()
   872  		runtime.UnlockOSThread()
   873  		e, i = runtime.LockOSCounts()
   874  		if e != 1 || i != 0 {
   875  			t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
   876  			return
   877  		}
   878  		runtime.UnlockOSThread()
   879  		e, i = runtime.LockOSCounts()
   880  		if e != 0 || i != 0 {
   881  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   882  			return
   883  		}
   884  	}()
   885  }
   886  
   887  func TestLockOSThreadExit(t *testing.T) {
   888  	testLockOSThreadExit(t, "testprog")
   889  }
   890  
   891  func testLockOSThreadExit(t *testing.T, prog string) {
   892  	output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
   893  	want := "OK\n"
   894  	if output != want {
   895  		t.Errorf("want %q, got %q", want, output)
   896  	}
   897  
   898  	output = runTestProg(t, prog, "LockOSThreadAlt")
   899  	if output != want {
   900  		t.Errorf("want %q, got %q", want, output)
   901  	}
   902  }
   903  
   904  func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
   905  	want := "OK\n"
   906  	skip := "unshare not permitted\n"
   907  	output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
   908  	if output == skip {
   909  		t.Skip("unshare syscall not permitted on this system")
   910  	} else if output != want {
   911  		t.Errorf("want %q, got %q", want, output)
   912  	}
   913  }
   914  
   915  // fakeSyscall emulates a system call.
   916  //go:nosplit
   917  func fakeSyscall(duration time.Duration) {
   918  	runtime.Entersyscall()
   919  	for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
   920  	}
   921  	runtime.Exitsyscall()
   922  }
   923  
   924  // Check that a goroutine will be preempted if it is calling short system calls.
   925  func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
   926  	if runtime.GOARCH == "wasm" {
   927  		t.Skip("no preemption on wasm yet")
   928  	}
   929  
   930  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   931  
   932  	interations := 10
   933  	if testing.Short() {
   934  		interations = 1
   935  	}
   936  	const (
   937  		maxDuration = 3 * time.Second
   938  		nroutines   = 8
   939  	)
   940  
   941  	for i := 0; i < interations; i++ {
   942  		c := make(chan bool, nroutines)
   943  		stop := uint32(0)
   944  
   945  		start := time.Now()
   946  		for g := 0; g < nroutines; g++ {
   947  			go func(stop *uint32) {
   948  				c <- true
   949  				for atomic.LoadUint32(stop) == 0 {
   950  					fakeSyscall(syscallDuration)
   951  				}
   952  				c <- true
   953  			}(&stop)
   954  		}
   955  		// wait until all goroutines have started.
   956  		for g := 0; g < nroutines; g++ {
   957  			<-c
   958  		}
   959  		atomic.StoreUint32(&stop, 1)
   960  		// wait until all goroutines have finished.
   961  		for g := 0; g < nroutines; g++ {
   962  			<-c
   963  		}
   964  		duration := time.Since(start)
   965  
   966  		if duration > maxDuration {
   967  			t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
   968  		}
   969  	}
   970  }
   971  
   972  func TestPreemptionAfterSyscall(t *testing.T) {
   973  	for _, i := range []time.Duration{10, 100, 1000} {
   974  		d := i * time.Microsecond
   975  		t.Run(fmt.Sprint(d), func(t *testing.T) {
   976  			testPreemptionAfterSyscall(t, d)
   977  		})
   978  	}
   979  }
   980  
   981  func TestGetgThreadSwitch(t *testing.T) {
   982  	runtime.RunGetgThreadSwitchTest()
   983  }
   984  

View as plain text