...
Run Format

Source file src/runtime/malloc_test.go

Documentation: runtime

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"internal/race"
    11  	"internal/testenv"
    12  	"os"
    13  	"os/exec"
    14  	"reflect"
    15  	. "runtime"
    16  	"strings"
    17  	"testing"
    18  	"time"
    19  	"unsafe"
    20  )
    21  
    22  var testMemStatsCount int
    23  
    24  func TestMemStats(t *testing.T) {
    25  	testMemStatsCount++
    26  
    27  	// Make sure there's at least one forced GC.
    28  	GC()
    29  
    30  	// Test that MemStats has sane values.
    31  	st := new(MemStats)
    32  	ReadMemStats(st)
    33  
    34  	nz := func(x interface{}) error {
    35  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    36  			return nil
    37  		}
    38  		return fmt.Errorf("zero value")
    39  	}
    40  	le := func(thresh float64) func(interface{}) error {
    41  		return func(x interface{}) error {
    42  			// These sanity tests aren't necessarily valid
    43  			// with high -test.count values, so only run
    44  			// them once.
    45  			if testMemStatsCount > 1 {
    46  				return nil
    47  			}
    48  
    49  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    50  				return nil
    51  			}
    52  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    53  		}
    54  	}
    55  	eq := func(x interface{}) func(interface{}) error {
    56  		return func(y interface{}) error {
    57  			if x == y {
    58  				return nil
    59  			}
    60  			return fmt.Errorf("want %v", x)
    61  		}
    62  	}
    63  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    64  	// PauseTotalNs can be 0 if timer resolution is poor.
    65  	fields := map[string][]func(interface{}) error{
    66  		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    67  		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    68  		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    69  		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    70  		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    71  		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    72  		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    73  		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    74  		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
    75  		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    76  		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    77  		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    78  		"BySize": nil,
    79  	}
    80  
    81  	rst := reflect.ValueOf(st).Elem()
    82  	for i := 0; i < rst.Type().NumField(); i++ {
    83  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    84  		checks, ok := fields[name]
    85  		if !ok {
    86  			t.Errorf("unknown MemStats field %s", name)
    87  			continue
    88  		}
    89  		for _, check := range checks {
    90  			if err := check(val); err != nil {
    91  				t.Errorf("%s = %v: %s", name, val, err)
    92  			}
    93  		}
    94  	}
    95  
    96  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
    97  		st.BuckHashSys+st.GCSys+st.OtherSys {
    98  		t.Fatalf("Bad sys value: %+v", *st)
    99  	}
   100  
   101  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
   102  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
   103  	}
   104  
   105  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
   106  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
   107  	}
   108  
   109  	var pauseTotal uint64
   110  	for _, pause := range st.PauseNs {
   111  		pauseTotal += pause
   112  	}
   113  	if int(st.NumGC) < len(st.PauseNs) {
   114  		// We have all pauses, so this should be exact.
   115  		if st.PauseTotalNs != pauseTotal {
   116  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   117  		}
   118  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   119  			if st.PauseNs[i] != 0 {
   120  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   121  			}
   122  			if st.PauseEnd[i] != 0 {
   123  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   124  			}
   125  		}
   126  	} else {
   127  		if st.PauseTotalNs < pauseTotal {
   128  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   129  		}
   130  	}
   131  
   132  	if st.NumForcedGC > st.NumGC {
   133  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   134  	}
   135  }
   136  
   137  func TestStringConcatenationAllocs(t *testing.T) {
   138  	n := testing.AllocsPerRun(1e3, func() {
   139  		b := make([]byte, 10)
   140  		for i := 0; i < 10; i++ {
   141  			b[i] = byte(i) + '0'
   142  		}
   143  		s := "foo" + string(b)
   144  		if want := "foo0123456789"; s != want {
   145  			t.Fatalf("want %v, got %v", want, s)
   146  		}
   147  	})
   148  	// Only string concatenation allocates.
   149  	if n != 1 {
   150  		t.Fatalf("want 1 allocation, got %v", n)
   151  	}
   152  }
   153  
   154  func TestTinyAlloc(t *testing.T) {
   155  	const N = 16
   156  	var v [N]unsafe.Pointer
   157  	for i := range v {
   158  		v[i] = unsafe.Pointer(new(byte))
   159  	}
   160  
   161  	chunks := make(map[uintptr]bool, N)
   162  	for _, p := range v {
   163  		chunks[uintptr(p)&^7] = true
   164  	}
   165  
   166  	if len(chunks) == N {
   167  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   168  	}
   169  }
   170  
   171  type acLink struct {
   172  	x [1 << 20]byte
   173  }
   174  
   175  var arenaCollisionSink []*acLink
   176  
   177  func TestArenaCollision(t *testing.T) {
   178  	testenv.MustHaveExec(t)
   179  
   180  	// Test that mheap.sysAlloc handles collisions with other
   181  	// memory mappings.
   182  	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
   183  		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
   184  		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
   185  		out, err := cmd.CombinedOutput()
   186  		if race.Enabled {
   187  			// This test runs the runtime out of hint
   188  			// addresses, so it will start mapping the
   189  			// heap wherever it can. The race detector
   190  			// doesn't support this, so look for the
   191  			// expected failure.
   192  			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
   193  				t.Fatalf("want %q, got:\n%s", want, string(out))
   194  			}
   195  		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
   196  			t.Fatalf("%s\n(exit status %v)", string(out), err)
   197  		}
   198  		return
   199  	}
   200  	disallowed := [][2]uintptr{}
   201  	// Drop all but the next 3 hints. 64-bit has a lot of hints,
   202  	// so it would take a lot of memory to go through all of them.
   203  	KeepNArenaHints(3)
   204  	// Consume these 3 hints and force the runtime to find some
   205  	// fallback hints.
   206  	for i := 0; i < 5; i++ {
   207  		// Reserve memory at the next hint so it can't be used
   208  		// for the heap.
   209  		start, end := MapNextArenaHint()
   210  		disallowed = append(disallowed, [2]uintptr{start, end})
   211  		// Allocate until the runtime tries to use the hint we
   212  		// just mapped over.
   213  		hint := GetNextArenaHint()
   214  		for GetNextArenaHint() == hint {
   215  			ac := new(acLink)
   216  			arenaCollisionSink = append(arenaCollisionSink, ac)
   217  			// The allocation must not have fallen into
   218  			// one of the reserved regions.
   219  			p := uintptr(unsafe.Pointer(ac))
   220  			for _, d := range disallowed {
   221  				if d[0] <= p && p < d[1] {
   222  					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
   223  				}
   224  			}
   225  		}
   226  	}
   227  }
   228  
   229  var mallocSink uintptr
   230  
   231  func BenchmarkMalloc8(b *testing.B) {
   232  	var x uintptr
   233  	for i := 0; i < b.N; i++ {
   234  		p := new(int64)
   235  		x ^= uintptr(unsafe.Pointer(p))
   236  	}
   237  	mallocSink = x
   238  }
   239  
   240  func BenchmarkMalloc16(b *testing.B) {
   241  	var x uintptr
   242  	for i := 0; i < b.N; i++ {
   243  		p := new([2]int64)
   244  		x ^= uintptr(unsafe.Pointer(p))
   245  	}
   246  	mallocSink = x
   247  }
   248  
   249  func BenchmarkMallocTypeInfo8(b *testing.B) {
   250  	var x uintptr
   251  	for i := 0; i < b.N; i++ {
   252  		p := new(struct {
   253  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   254  		})
   255  		x ^= uintptr(unsafe.Pointer(p))
   256  	}
   257  	mallocSink = x
   258  }
   259  
   260  func BenchmarkMallocTypeInfo16(b *testing.B) {
   261  	var x uintptr
   262  	for i := 0; i < b.N; i++ {
   263  		p := new(struct {
   264  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   265  		})
   266  		x ^= uintptr(unsafe.Pointer(p))
   267  	}
   268  	mallocSink = x
   269  }
   270  
   271  type LargeStruct struct {
   272  	x [16][]byte
   273  }
   274  
   275  func BenchmarkMallocLargeStruct(b *testing.B) {
   276  	var x uintptr
   277  	for i := 0; i < b.N; i++ {
   278  		p := make([]LargeStruct, 2)
   279  		x ^= uintptr(unsafe.Pointer(&p[0]))
   280  	}
   281  	mallocSink = x
   282  }
   283  
   284  var n = flag.Int("n", 1000, "number of goroutines")
   285  
   286  func BenchmarkGoroutineSelect(b *testing.B) {
   287  	quit := make(chan struct{})
   288  	read := func(ch chan struct{}) {
   289  		for {
   290  			select {
   291  			case _, ok := <-ch:
   292  				if !ok {
   293  					return
   294  				}
   295  			case <-quit:
   296  				return
   297  			}
   298  		}
   299  	}
   300  	benchHelper(b, *n, read)
   301  }
   302  
   303  func BenchmarkGoroutineBlocking(b *testing.B) {
   304  	read := func(ch chan struct{}) {
   305  		for {
   306  			if _, ok := <-ch; !ok {
   307  				return
   308  			}
   309  		}
   310  	}
   311  	benchHelper(b, *n, read)
   312  }
   313  
   314  func BenchmarkGoroutineForRange(b *testing.B) {
   315  	read := func(ch chan struct{}) {
   316  		for range ch {
   317  		}
   318  	}
   319  	benchHelper(b, *n, read)
   320  }
   321  
   322  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   323  	m := make([]chan struct{}, n)
   324  	for i := range m {
   325  		m[i] = make(chan struct{}, 1)
   326  		go read(m[i])
   327  	}
   328  	b.StopTimer()
   329  	b.ResetTimer()
   330  	GC()
   331  
   332  	for i := 0; i < b.N; i++ {
   333  		for _, ch := range m {
   334  			if ch != nil {
   335  				ch <- struct{}{}
   336  			}
   337  		}
   338  		time.Sleep(10 * time.Millisecond)
   339  		b.StartTimer()
   340  		GC()
   341  		b.StopTimer()
   342  	}
   343  
   344  	for _, ch := range m {
   345  		close(ch)
   346  	}
   347  	time.Sleep(10 * time.Millisecond)
   348  }
   349  
   350  func BenchmarkGoroutineIdle(b *testing.B) {
   351  	quit := make(chan struct{})
   352  	fn := func() {
   353  		<-quit
   354  	}
   355  	for i := 0; i < *n; i++ {
   356  		go fn()
   357  	}
   358  
   359  	GC()
   360  	b.ResetTimer()
   361  
   362  	for i := 0; i < b.N; i++ {
   363  		GC()
   364  	}
   365  
   366  	b.StopTimer()
   367  	close(quit)
   368  	time.Sleep(10 * time.Millisecond)
   369  }
   370  

View as plain text