...
Run Format

Source file src/runtime/proc_test.go

Documentation: runtime

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"math"
     9  	"net"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"strings"
    13  	"sync"
    14  	"sync/atomic"
    15  	"syscall"
    16  	"testing"
    17  	"time"
    18  )
    19  
    20  var stop = make(chan bool, 1)
    21  
    22  func perpetuumMobile() {
    23  	select {
    24  	case <-stop:
    25  	default:
    26  		go perpetuumMobile()
    27  	}
    28  }
    29  
    30  func TestStopTheWorldDeadlock(t *testing.T) {
    31  	if runtime.GOARCH == "wasm" {
    32  		t.Skip("no preemption on wasm yet")
    33  	}
    34  	if testing.Short() {
    35  		t.Skip("skipping during short test")
    36  	}
    37  	maxprocs := runtime.GOMAXPROCS(3)
    38  	compl := make(chan bool, 2)
    39  	go func() {
    40  		for i := 0; i != 1000; i += 1 {
    41  			runtime.GC()
    42  		}
    43  		compl <- true
    44  	}()
    45  	go func() {
    46  		for i := 0; i != 1000; i += 1 {
    47  			runtime.GOMAXPROCS(3)
    48  		}
    49  		compl <- true
    50  	}()
    51  	go perpetuumMobile()
    52  	<-compl
    53  	<-compl
    54  	stop <- true
    55  	runtime.GOMAXPROCS(maxprocs)
    56  }
    57  
    58  func TestYieldProgress(t *testing.T) {
    59  	testYieldProgress(false)
    60  }
    61  
    62  func TestYieldLockedProgress(t *testing.T) {
    63  	testYieldProgress(true)
    64  }
    65  
    66  func testYieldProgress(locked bool) {
    67  	c := make(chan bool)
    68  	cack := make(chan bool)
    69  	go func() {
    70  		if locked {
    71  			runtime.LockOSThread()
    72  		}
    73  		for {
    74  			select {
    75  			case <-c:
    76  				cack <- true
    77  				return
    78  			default:
    79  				runtime.Gosched()
    80  			}
    81  		}
    82  	}()
    83  	time.Sleep(10 * time.Millisecond)
    84  	c <- true
    85  	<-cack
    86  }
    87  
    88  func TestYieldLocked(t *testing.T) {
    89  	const N = 10
    90  	c := make(chan bool)
    91  	go func() {
    92  		runtime.LockOSThread()
    93  		for i := 0; i < N; i++ {
    94  			runtime.Gosched()
    95  			time.Sleep(time.Millisecond)
    96  		}
    97  		c <- true
    98  		// runtime.UnlockOSThread() is deliberately omitted
    99  	}()
   100  	<-c
   101  }
   102  
   103  func TestGoroutineParallelism(t *testing.T) {
   104  	if runtime.NumCPU() == 1 {
   105  		// Takes too long, too easy to deadlock, etc.
   106  		t.Skip("skipping on uniprocessor")
   107  	}
   108  	P := 4
   109  	N := 10
   110  	if testing.Short() {
   111  		P = 3
   112  		N = 3
   113  	}
   114  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   115  	// If runtime triggers a forced GC during this test then it will deadlock,
   116  	// since the goroutines can't be stopped/preempted.
   117  	// Disable GC for this test (see issue #10958).
   118  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   119  	for try := 0; try < N; try++ {
   120  		done := make(chan bool)
   121  		x := uint32(0)
   122  		for p := 0; p < P; p++ {
   123  			// Test that all P goroutines are scheduled at the same time
   124  			go func(p int) {
   125  				for i := 0; i < 3; i++ {
   126  					expected := uint32(P*i + p)
   127  					for atomic.LoadUint32(&x) != expected {
   128  					}
   129  					atomic.StoreUint32(&x, expected+1)
   130  				}
   131  				done <- true
   132  			}(p)
   133  		}
   134  		for p := 0; p < P; p++ {
   135  			<-done
   136  		}
   137  	}
   138  }
   139  
   140  // Test that all runnable goroutines are scheduled at the same time.
   141  func TestGoroutineParallelism2(t *testing.T) {
   142  	//testGoroutineParallelism2(t, false, false)
   143  	testGoroutineParallelism2(t, true, false)
   144  	testGoroutineParallelism2(t, false, true)
   145  	testGoroutineParallelism2(t, true, true)
   146  }
   147  
   148  func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
   149  	if runtime.NumCPU() == 1 {
   150  		// Takes too long, too easy to deadlock, etc.
   151  		t.Skip("skipping on uniprocessor")
   152  	}
   153  	P := 4
   154  	N := 10
   155  	if testing.Short() {
   156  		N = 3
   157  	}
   158  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
   159  	// If runtime triggers a forced GC during this test then it will deadlock,
   160  	// since the goroutines can't be stopped/preempted.
   161  	// Disable GC for this test (see issue #10958).
   162  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   163  	for try := 0; try < N; try++ {
   164  		if load {
   165  			// Create P goroutines and wait until they all run.
   166  			// When we run the actual test below, worker threads
   167  			// running the goroutines will start parking.
   168  			done := make(chan bool)
   169  			x := uint32(0)
   170  			for p := 0; p < P; p++ {
   171  				go func() {
   172  					if atomic.AddUint32(&x, 1) == uint32(P) {
   173  						done <- true
   174  						return
   175  					}
   176  					for atomic.LoadUint32(&x) != uint32(P) {
   177  					}
   178  				}()
   179  			}
   180  			<-done
   181  		}
   182  		if netpoll {
   183  			// Enable netpoller, affects schedler behavior.
   184  			laddr := "localhost:0"
   185  			if runtime.GOOS == "android" {
   186  				// On some Android devices, there are no records for localhost,
   187  				// see https://golang.org/issues/14486.
   188  				// Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
   189  				laddr = "127.0.0.1:0"
   190  			}
   191  			ln, err := net.Listen("tcp", laddr)
   192  			if err != nil {
   193  				defer ln.Close() // yup, defer in a loop
   194  			}
   195  		}
   196  		done := make(chan bool)
   197  		x := uint32(0)
   198  		// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
   199  		for p := 0; p < P/2; p++ {
   200  			go func(p int) {
   201  				for p2 := 0; p2 < 2; p2++ {
   202  					go func(p2 int) {
   203  						for i := 0; i < 3; i++ {
   204  							expected := uint32(P*i + p*2 + p2)
   205  							for atomic.LoadUint32(&x) != expected {
   206  							}
   207  							atomic.StoreUint32(&x, expected+1)
   208  						}
   209  						done <- true
   210  					}(p2)
   211  				}
   212  			}(p)
   213  		}
   214  		for p := 0; p < P; p++ {
   215  			<-done
   216  		}
   217  	}
   218  }
   219  
   220  func TestBlockLocked(t *testing.T) {
   221  	const N = 10
   222  	c := make(chan bool)
   223  	go func() {
   224  		runtime.LockOSThread()
   225  		for i := 0; i < N; i++ {
   226  			c <- true
   227  		}
   228  		runtime.UnlockOSThread()
   229  	}()
   230  	for i := 0; i < N; i++ {
   231  		<-c
   232  	}
   233  }
   234  
   235  func TestTimerFairness(t *testing.T) {
   236  	if runtime.GOARCH == "wasm" {
   237  		t.Skip("no preemption on wasm yet")
   238  	}
   239  
   240  	done := make(chan bool)
   241  	c := make(chan bool)
   242  	for i := 0; i < 2; i++ {
   243  		go func() {
   244  			for {
   245  				select {
   246  				case c <- true:
   247  				case <-done:
   248  					return
   249  				}
   250  			}
   251  		}()
   252  	}
   253  
   254  	timer := time.After(20 * time.Millisecond)
   255  	for {
   256  		select {
   257  		case <-c:
   258  		case <-timer:
   259  			close(done)
   260  			return
   261  		}
   262  	}
   263  }
   264  
   265  func TestTimerFairness2(t *testing.T) {
   266  	if runtime.GOARCH == "wasm" {
   267  		t.Skip("no preemption on wasm yet")
   268  	}
   269  
   270  	done := make(chan bool)
   271  	c := make(chan bool)
   272  	for i := 0; i < 2; i++ {
   273  		go func() {
   274  			timer := time.After(20 * time.Millisecond)
   275  			var buf [1]byte
   276  			for {
   277  				syscall.Read(0, buf[0:0])
   278  				select {
   279  				case c <- true:
   280  				case <-c:
   281  				case <-timer:
   282  					done <- true
   283  					return
   284  				}
   285  			}
   286  		}()
   287  	}
   288  	<-done
   289  	<-done
   290  }
   291  
   292  // The function is used to test preemption at split stack checks.
   293  // Declaring a var avoids inlining at the call site.
   294  var preempt = func() int {
   295  	var a [128]int
   296  	sum := 0
   297  	for _, v := range a {
   298  		sum += v
   299  	}
   300  	return sum
   301  }
   302  
   303  func TestPreemption(t *testing.T) {
   304  	if runtime.GOARCH == "wasm" {
   305  		t.Skip("no preemption on wasm yet")
   306  	}
   307  
   308  	// Test that goroutines are preempted at function calls.
   309  	N := 5
   310  	if testing.Short() {
   311  		N = 2
   312  	}
   313  	c := make(chan bool)
   314  	var x uint32
   315  	for g := 0; g < 2; g++ {
   316  		go func(g int) {
   317  			for i := 0; i < N; i++ {
   318  				for atomic.LoadUint32(&x) != uint32(g) {
   319  					preempt()
   320  				}
   321  				atomic.StoreUint32(&x, uint32(1-g))
   322  			}
   323  			c <- true
   324  		}(g)
   325  	}
   326  	<-c
   327  	<-c
   328  }
   329  
   330  func TestPreemptionGC(t *testing.T) {
   331  	if runtime.GOARCH == "wasm" {
   332  		t.Skip("no preemption on wasm yet")
   333  	}
   334  
   335  	// Test that pending GC preempts running goroutines.
   336  	P := 5
   337  	N := 10
   338  	if testing.Short() {
   339  		P = 3
   340  		N = 2
   341  	}
   342  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
   343  	var stop uint32
   344  	for i := 0; i < P; i++ {
   345  		go func() {
   346  			for atomic.LoadUint32(&stop) == 0 {
   347  				preempt()
   348  			}
   349  		}()
   350  	}
   351  	for i := 0; i < N; i++ {
   352  		runtime.Gosched()
   353  		runtime.GC()
   354  	}
   355  	atomic.StoreUint32(&stop, 1)
   356  }
   357  
   358  func TestGCFairness(t *testing.T) {
   359  	output := runTestProg(t, "testprog", "GCFairness")
   360  	want := "OK\n"
   361  	if output != want {
   362  		t.Fatalf("want %s, got %s\n", want, output)
   363  	}
   364  }
   365  
   366  func TestGCFairness2(t *testing.T) {
   367  	output := runTestProg(t, "testprog", "GCFairness2")
   368  	want := "OK\n"
   369  	if output != want {
   370  		t.Fatalf("want %s, got %s\n", want, output)
   371  	}
   372  }
   373  
   374  func TestNumGoroutine(t *testing.T) {
   375  	output := runTestProg(t, "testprog", "NumGoroutine")
   376  	want := "1\n"
   377  	if output != want {
   378  		t.Fatalf("want %q, got %q", want, output)
   379  	}
   380  
   381  	buf := make([]byte, 1<<20)
   382  
   383  	// Try up to 10 times for a match before giving up.
   384  	// This is a fundamentally racy check but it's important
   385  	// to notice if NumGoroutine and Stack are _always_ out of sync.
   386  	for i := 0; ; i++ {
   387  		// Give goroutines about to exit a chance to exit.
   388  		// The NumGoroutine and Stack below need to see
   389  		// the same state of the world, so anything we can do
   390  		// to keep it quiet is good.
   391  		runtime.Gosched()
   392  
   393  		n := runtime.NumGoroutine()
   394  		buf = buf[:runtime.Stack(buf, true)]
   395  
   396  		nstk := strings.Count(string(buf), "goroutine ")
   397  		if n == nstk {
   398  			break
   399  		}
   400  		if i >= 10 {
   401  			t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
   402  		}
   403  	}
   404  }
   405  
   406  func TestPingPongHog(t *testing.T) {
   407  	if runtime.GOARCH == "wasm" {
   408  		t.Skip("no preemption on wasm yet")
   409  	}
   410  	if testing.Short() {
   411  		t.Skip("skipping in -short mode")
   412  	}
   413  
   414  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   415  	done := make(chan bool)
   416  	hogChan, lightChan := make(chan bool), make(chan bool)
   417  	hogCount, lightCount := 0, 0
   418  
   419  	run := func(limit int, counter *int, wake chan bool) {
   420  		for {
   421  			select {
   422  			case <-done:
   423  				return
   424  
   425  			case <-wake:
   426  				for i := 0; i < limit; i++ {
   427  					*counter++
   428  				}
   429  				wake <- true
   430  			}
   431  		}
   432  	}
   433  
   434  	// Start two co-scheduled hog goroutines.
   435  	for i := 0; i < 2; i++ {
   436  		go run(1e6, &hogCount, hogChan)
   437  	}
   438  
   439  	// Start two co-scheduled light goroutines.
   440  	for i := 0; i < 2; i++ {
   441  		go run(1e3, &lightCount, lightChan)
   442  	}
   443  
   444  	// Start goroutine pairs and wait for a few preemption rounds.
   445  	hogChan <- true
   446  	lightChan <- true
   447  	time.Sleep(100 * time.Millisecond)
   448  	close(done)
   449  	<-hogChan
   450  	<-lightChan
   451  
   452  	// Check that hogCount and lightCount are within a factor of
   453  	// 5, which indicates that both pairs of goroutines handed off
   454  	// the P within a time-slice to their buddy. We can use a
   455  	// fairly large factor here to make this robust: if the
   456  	// scheduler isn't working right, the gap should be ~1000X.
   457  	const factor = 5
   458  	if hogCount > lightCount*factor || lightCount > hogCount*factor {
   459  		t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
   460  	}
   461  }
   462  
   463  func BenchmarkPingPongHog(b *testing.B) {
   464  	if b.N == 0 {
   465  		return
   466  	}
   467  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   468  
   469  	// Create a CPU hog
   470  	stop, done := make(chan bool), make(chan bool)
   471  	go func() {
   472  		for {
   473  			select {
   474  			case <-stop:
   475  				done <- true
   476  				return
   477  			default:
   478  			}
   479  		}
   480  	}()
   481  
   482  	// Ping-pong b.N times
   483  	ping, pong := make(chan bool), make(chan bool)
   484  	go func() {
   485  		for j := 0; j < b.N; j++ {
   486  			pong <- <-ping
   487  		}
   488  		close(stop)
   489  		done <- true
   490  	}()
   491  	go func() {
   492  		for i := 0; i < b.N; i++ {
   493  			ping <- <-pong
   494  		}
   495  		done <- true
   496  	}()
   497  	b.ResetTimer()
   498  	ping <- true // Start ping-pong
   499  	<-stop
   500  	b.StopTimer()
   501  	<-ping // Let last ponger exit
   502  	<-done // Make sure goroutines exit
   503  	<-done
   504  	<-done
   505  }
   506  
   507  func stackGrowthRecursive(i int) {
   508  	var pad [128]uint64
   509  	if i != 0 && pad[0] == 0 {
   510  		stackGrowthRecursive(i - 1)
   511  	}
   512  }
   513  
   514  func TestPreemptSplitBig(t *testing.T) {
   515  	if testing.Short() {
   516  		t.Skip("skipping in -short mode")
   517  	}
   518  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   519  	stop := make(chan int)
   520  	go big(stop)
   521  	for i := 0; i < 3; i++ {
   522  		time.Sleep(10 * time.Microsecond) // let big start running
   523  		runtime.GC()
   524  	}
   525  	close(stop)
   526  }
   527  
   528  func big(stop chan int) int {
   529  	n := 0
   530  	for {
   531  		// delay so that gc is sure to have asked for a preemption
   532  		for i := 0; i < 1e9; i++ {
   533  			n++
   534  		}
   535  
   536  		// call bigframe, which used to miss the preemption in its prologue.
   537  		bigframe(stop)
   538  
   539  		// check if we've been asked to stop.
   540  		select {
   541  		case <-stop:
   542  			return n
   543  		}
   544  	}
   545  }
   546  
   547  func bigframe(stop chan int) int {
   548  	// not splitting the stack will overflow.
   549  	// small will notice that it needs a stack split and will
   550  	// catch the overflow.
   551  	var x [8192]byte
   552  	return small(stop, &x)
   553  }
   554  
   555  func small(stop chan int, x *[8192]byte) int {
   556  	for i := range x {
   557  		x[i] = byte(i)
   558  	}
   559  	sum := 0
   560  	for i := range x {
   561  		sum += int(x[i])
   562  	}
   563  
   564  	// keep small from being a leaf function, which might
   565  	// make it not do any stack check at all.
   566  	nonleaf(stop)
   567  
   568  	return sum
   569  }
   570  
   571  func nonleaf(stop chan int) bool {
   572  	// do something that won't be inlined:
   573  	select {
   574  	case <-stop:
   575  		return true
   576  	default:
   577  		return false
   578  	}
   579  }
   580  
   581  func TestSchedLocalQueue(t *testing.T) {
   582  	runtime.RunSchedLocalQueueTest()
   583  }
   584  
   585  func TestSchedLocalQueueSteal(t *testing.T) {
   586  	runtime.RunSchedLocalQueueStealTest()
   587  }
   588  
   589  func TestSchedLocalQueueEmpty(t *testing.T) {
   590  	if runtime.NumCPU() == 1 {
   591  		// Takes too long and does not trigger the race.
   592  		t.Skip("skipping on uniprocessor")
   593  	}
   594  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   595  
   596  	// If runtime triggers a forced GC during this test then it will deadlock,
   597  	// since the goroutines can't be stopped/preempted during spin wait.
   598  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   599  
   600  	iters := int(1e5)
   601  	if testing.Short() {
   602  		iters = 1e2
   603  	}
   604  	runtime.RunSchedLocalQueueEmptyTest(iters)
   605  }
   606  
   607  func benchmarkStackGrowth(b *testing.B, rec int) {
   608  	b.RunParallel(func(pb *testing.PB) {
   609  		for pb.Next() {
   610  			stackGrowthRecursive(rec)
   611  		}
   612  	})
   613  }
   614  
   615  func BenchmarkStackGrowth(b *testing.B) {
   616  	benchmarkStackGrowth(b, 10)
   617  }
   618  
   619  func BenchmarkStackGrowthDeep(b *testing.B) {
   620  	benchmarkStackGrowth(b, 1024)
   621  }
   622  
   623  func BenchmarkCreateGoroutines(b *testing.B) {
   624  	benchmarkCreateGoroutines(b, 1)
   625  }
   626  
   627  func BenchmarkCreateGoroutinesParallel(b *testing.B) {
   628  	benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
   629  }
   630  
   631  func benchmarkCreateGoroutines(b *testing.B, procs int) {
   632  	c := make(chan bool)
   633  	var f func(n int)
   634  	f = func(n int) {
   635  		if n == 0 {
   636  			c <- true
   637  			return
   638  		}
   639  		go f(n - 1)
   640  	}
   641  	for i := 0; i < procs; i++ {
   642  		go f(b.N / procs)
   643  	}
   644  	for i := 0; i < procs; i++ {
   645  		<-c
   646  	}
   647  }
   648  
   649  func BenchmarkCreateGoroutinesCapture(b *testing.B) {
   650  	b.ReportAllocs()
   651  	for i := 0; i < b.N; i++ {
   652  		const N = 4
   653  		var wg sync.WaitGroup
   654  		wg.Add(N)
   655  		for i := 0; i < N; i++ {
   656  			i := i
   657  			go func() {
   658  				if i >= N {
   659  					b.Logf("bad") // just to capture b
   660  				}
   661  				wg.Done()
   662  			}()
   663  		}
   664  		wg.Wait()
   665  	}
   666  }
   667  
   668  func BenchmarkClosureCall(b *testing.B) {
   669  	sum := 0
   670  	off1 := 1
   671  	for i := 0; i < b.N; i++ {
   672  		off2 := 2
   673  		func() {
   674  			sum += i + off1 + off2
   675  		}()
   676  	}
   677  	_ = sum
   678  }
   679  
   680  func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
   681  	if runtime.GOMAXPROCS(0) == 1 {
   682  		b.Skip("skipping: GOMAXPROCS=1")
   683  	}
   684  
   685  	wakeDelay := 5 * time.Microsecond
   686  	for _, delay := range []time.Duration{
   687  		0,
   688  		1 * time.Microsecond,
   689  		2 * time.Microsecond,
   690  		5 * time.Microsecond,
   691  		10 * time.Microsecond,
   692  		20 * time.Microsecond,
   693  		50 * time.Microsecond,
   694  		100 * time.Microsecond,
   695  	} {
   696  		b.Run(delay.String(), func(b *testing.B) {
   697  			if b.N == 0 {
   698  				return
   699  			}
   700  			// Start two goroutines, which alternate between being
   701  			// sender and receiver in the following protocol:
   702  			//
   703  			// - The receiver spins for `delay` and then does a
   704  			// blocking receive on a channel.
   705  			//
   706  			// - The sender spins for `delay+wakeDelay` and then
   707  			// sends to the same channel. (The addition of
   708  			// `wakeDelay` improves the probability that the
   709  			// receiver will be blocking when the send occurs when
   710  			// the goroutines execute in parallel.)
   711  			//
   712  			// In each iteration of the benchmark, each goroutine
   713  			// acts once as sender and once as receiver, so each
   714  			// goroutine spins for delay twice.
   715  			//
   716  			// BenchmarkWakeupParallel is used to estimate how
   717  			// efficiently the scheduler parallelizes goroutines in
   718  			// the presence of blocking:
   719  			//
   720  			// - If both goroutines are executed on the same core,
   721  			// an increase in delay by N will increase the time per
   722  			// iteration by 4*N, because all 4 delays are
   723  			// serialized.
   724  			//
   725  			// - Otherwise, an increase in delay by N will increase
   726  			// the time per iteration by 2*N, and the time per
   727  			// iteration is 2 * (runtime overhead + chan
   728  			// send/receive pair + delay + wakeDelay). This allows
   729  			// the runtime overhead, including the time it takes
   730  			// for the unblocked goroutine to be scheduled, to be
   731  			// estimated.
   732  			ping, pong := make(chan struct{}), make(chan struct{})
   733  			start := make(chan struct{})
   734  			done := make(chan struct{})
   735  			go func() {
   736  				<-start
   737  				for i := 0; i < b.N; i++ {
   738  					// sender
   739  					spin(delay + wakeDelay)
   740  					ping <- struct{}{}
   741  					// receiver
   742  					spin(delay)
   743  					<-pong
   744  				}
   745  				done <- struct{}{}
   746  			}()
   747  			go func() {
   748  				for i := 0; i < b.N; i++ {
   749  					// receiver
   750  					spin(delay)
   751  					<-ping
   752  					// sender
   753  					spin(delay + wakeDelay)
   754  					pong <- struct{}{}
   755  				}
   756  				done <- struct{}{}
   757  			}()
   758  			b.ResetTimer()
   759  			start <- struct{}{}
   760  			<-done
   761  			<-done
   762  		})
   763  	}
   764  }
   765  
   766  func BenchmarkWakeupParallelSpinning(b *testing.B) {
   767  	benchmarkWakeupParallel(b, func(d time.Duration) {
   768  		end := time.Now().Add(d)
   769  		for time.Now().Before(end) {
   770  			// do nothing
   771  		}
   772  	})
   773  }
   774  
   775  // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
   776  // to sleep for the given duration. If nil, dependent tests are skipped.
   777  // The implementation should invoke a blocking system call and not
   778  // call time.Sleep, which would deschedule the goroutine.
   779  var sysNanosleep func(d time.Duration)
   780  
   781  func BenchmarkWakeupParallelSyscall(b *testing.B) {
   782  	if sysNanosleep == nil {
   783  		b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
   784  	}
   785  	benchmarkWakeupParallel(b, func(d time.Duration) {
   786  		sysNanosleep(d)
   787  	})
   788  }
   789  
   790  type Matrix [][]float64
   791  
   792  func BenchmarkMatmult(b *testing.B) {
   793  	b.StopTimer()
   794  	// matmult is O(N**3) but testing expects O(b.N),
   795  	// so we need to take cube root of b.N
   796  	n := int(math.Cbrt(float64(b.N))) + 1
   797  	A := makeMatrix(n)
   798  	B := makeMatrix(n)
   799  	C := makeMatrix(n)
   800  	b.StartTimer()
   801  	matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
   802  }
   803  
   804  func makeMatrix(n int) Matrix {
   805  	m := make(Matrix, n)
   806  	for i := 0; i < n; i++ {
   807  		m[i] = make([]float64, n)
   808  		for j := 0; j < n; j++ {
   809  			m[i][j] = float64(i*n + j)
   810  		}
   811  	}
   812  	return m
   813  }
   814  
   815  func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
   816  	di := i1 - i0
   817  	dj := j1 - j0
   818  	dk := k1 - k0
   819  	if di >= dj && di >= dk && di >= threshold {
   820  		// divide in two by y axis
   821  		mi := i0 + di/2
   822  		done1 := make(chan struct{}, 1)
   823  		go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
   824  		matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
   825  		<-done1
   826  	} else if dj >= dk && dj >= threshold {
   827  		// divide in two by x axis
   828  		mj := j0 + dj/2
   829  		done1 := make(chan struct{}, 1)
   830  		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
   831  		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
   832  		<-done1
   833  	} else if dk >= threshold {
   834  		// divide in two by "k" axis
   835  		// deliberately not parallel because of data races
   836  		mk := k0 + dk/2
   837  		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
   838  		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
   839  	} else {
   840  		// the matrices are small enough, compute directly
   841  		for i := i0; i < i1; i++ {
   842  			for j := j0; j < j1; j++ {
   843  				for k := k0; k < k1; k++ {
   844  					C[i][j] += A[i][k] * B[k][j]
   845  				}
   846  			}
   847  		}
   848  	}
   849  	if done != nil {
   850  		done <- struct{}{}
   851  	}
   852  }
   853  
   854  func TestStealOrder(t *testing.T) {
   855  	runtime.RunStealOrderTest()
   856  }
   857  
   858  func TestLockOSThreadNesting(t *testing.T) {
   859  	if runtime.GOARCH == "wasm" {
   860  		t.Skip("no threads on wasm yet")
   861  	}
   862  
   863  	go func() {
   864  		e, i := runtime.LockOSCounts()
   865  		if e != 0 || i != 0 {
   866  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   867  			return
   868  		}
   869  		runtime.LockOSThread()
   870  		runtime.LockOSThread()
   871  		runtime.UnlockOSThread()
   872  		e, i = runtime.LockOSCounts()
   873  		if e != 1 || i != 0 {
   874  			t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
   875  			return
   876  		}
   877  		runtime.UnlockOSThread()
   878  		e, i = runtime.LockOSCounts()
   879  		if e != 0 || i != 0 {
   880  			t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
   881  			return
   882  		}
   883  	}()
   884  }
   885  
   886  func TestLockOSThreadExit(t *testing.T) {
   887  	testLockOSThreadExit(t, "testprog")
   888  }
   889  
   890  func testLockOSThreadExit(t *testing.T, prog string) {
   891  	output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
   892  	want := "OK\n"
   893  	if output != want {
   894  		t.Errorf("want %s, got %s\n", want, output)
   895  	}
   896  
   897  	output = runTestProg(t, prog, "LockOSThreadAlt")
   898  	if output != want {
   899  		t.Errorf("want %s, got %s\n", want, output)
   900  	}
   901  }
   902  

View as plain text