Source file src/runtime/lockrank_on.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const staticLockRanking = true
    15  
    16  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    17  // stopped.
    18  var worldIsStopped atomic.Uint32
    19  
    20  // lockRankStruct is embedded in mutex
    21  type lockRankStruct struct {
    22  	// static lock ranking of the lock
    23  	rank lockRank
    24  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    25  	// 32-bit systems.
    26  	pad int
    27  }
    28  
    29  // lockInit(l *mutex, rank int) sets the rank of lock before it is used.
    30  // If there is no clear place to initialize a lock, then the rank of a lock can be
    31  // specified during the lock call itself via lockWithRank(l *mutex, rank int).
    32  func lockInit(l *mutex, rank lockRank) {
    33  	l.rank = rank
    34  }
    35  
    36  func getLockRank(l *mutex) lockRank {
    37  	return l.rank
    38  }
    39  
    40  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    41  // when acquiring a non-static lock.
    42  //
    43  // Note that we need to be careful about stack splits:
    44  //
    45  // This function is not nosplit, thus it may split at function entry. This may
    46  // introduce a new edge in the lock order, but it is no different from any
    47  // other (nosplit) call before this call (including the call to lock() itself).
    48  //
    49  // However, we switch to the systemstack to record the lock held to ensure that
    50  // we record an accurate lock ordering. e.g., without systemstack, a stack
    51  // split on entry to lock2() would record stack split locks as taken after l,
    52  // even though l is not actually locked yet.
    53  func lockWithRank(l *mutex, rank lockRank) {
    54  	if l == &debuglock || l == &paniclk || l == &raceFiniLock {
    55  		// debuglock is only used for println/printlock(). Don't do lock
    56  		// rank recording for it, since print/println are used when
    57  		// printing out a lock ordering problem below.
    58  		//
    59  		// paniclk is only used for fatal throw/panic. Don't do lock
    60  		// ranking recording for it, since we throw after reporting a
    61  		// lock ordering problem. Additionally, paniclk may be taken
    62  		// after effectively any lock (anywhere we might panic), which
    63  		// the partial order doesn't cover.
    64  		//
    65  		// raceFiniLock is held while exiting when running
    66  		// the race detector. Don't do lock rank recording for it,
    67  		// since we are exiting.
    68  		lock2(l)
    69  		return
    70  	}
    71  	if rank == 0 {
    72  		rank = lockRankLeafRank
    73  	}
    74  	gp := getg()
    75  	// Log the new class.
    76  	systemstack(func() {
    77  		i := gp.m.locksHeldLen
    78  		if i >= len(gp.m.locksHeld) {
    79  			throw("too many locks held concurrently for rank checking")
    80  		}
    81  		gp.m.locksHeld[i].rank = rank
    82  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    83  		gp.m.locksHeldLen++
    84  
    85  		// i is the index of the lock being acquired
    86  		if i > 0 {
    87  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    88  		}
    89  		lock2(l)
    90  	})
    91  }
    92  
    93  // nosplit to ensure it can be called in as many contexts as possible.
    94  //
    95  //go:nosplit
    96  func printHeldLocks(gp *g) {
    97  	if gp.m.locksHeldLen == 0 {
    98  		println("<none>")
    99  		return
   100  	}
   101  
   102  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
   103  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
   104  	}
   105  }
   106  
   107  // acquireLockRank acquires a rank which is not associated with a mutex lock
   108  //
   109  // This function may be called in nosplit context and thus must be nosplit.
   110  //
   111  //go:nosplit
   112  func acquireLockRank(rank lockRank) {
   113  	gp := getg()
   114  	// Log the new class. See comment on lockWithRank.
   115  	systemstack(func() {
   116  		i := gp.m.locksHeldLen
   117  		if i >= len(gp.m.locksHeld) {
   118  			throw("too many locks held concurrently for rank checking")
   119  		}
   120  		gp.m.locksHeld[i].rank = rank
   121  		gp.m.locksHeld[i].lockAddr = 0
   122  		gp.m.locksHeldLen++
   123  
   124  		// i is the index of the lock being acquired
   125  		if i > 0 {
   126  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   127  		}
   128  	})
   129  }
   130  
   131  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   132  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   133  //
   134  //go:systemstack
   135  func checkRanks(gp *g, prevRank, rank lockRank) {
   136  	rankOK := false
   137  	if rank < prevRank {
   138  		// If rank < prevRank, then we definitely have a rank error
   139  		rankOK = false
   140  	} else if rank == lockRankLeafRank {
   141  		// If new lock is a leaf lock, then the preceding lock can
   142  		// be anything except another leaf lock.
   143  		rankOK = prevRank < lockRankLeafRank
   144  	} else {
   145  		// We've now verified the total lock ranking, but we
   146  		// also enforce the partial ordering specified by
   147  		// lockPartialOrder as well. Two locks with the same rank
   148  		// can only be acquired at the same time if explicitly
   149  		// listed in the lockPartialOrder table.
   150  		list := lockPartialOrder[rank]
   151  		for _, entry := range list {
   152  			if entry == prevRank {
   153  				rankOK = true
   154  				break
   155  			}
   156  		}
   157  	}
   158  	if !rankOK {
   159  		printlock()
   160  		println(gp.m.procid, " ======")
   161  		printHeldLocks(gp)
   162  		throw("lock ordering problem")
   163  	}
   164  }
   165  
   166  // See comment on lockWithRank regarding stack splitting.
   167  func unlockWithRank(l *mutex) {
   168  	if l == &debuglock || l == &paniclk || l == &raceFiniLock {
   169  		// See comment at beginning of lockWithRank.
   170  		unlock2(l)
   171  		return
   172  	}
   173  	gp := getg()
   174  	systemstack(func() {
   175  		found := false
   176  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   177  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   178  				found = true
   179  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   180  				gp.m.locksHeldLen--
   181  				break
   182  			}
   183  		}
   184  		if !found {
   185  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   186  			throw("unlock without matching lock acquire")
   187  		}
   188  		unlock2(l)
   189  	})
   190  }
   191  
   192  // releaseLockRank releases a rank which is not associated with a mutex lock
   193  //
   194  // This function may be called in nosplit context and thus must be nosplit.
   195  //
   196  //go:nosplit
   197  func releaseLockRank(rank lockRank) {
   198  	gp := getg()
   199  	systemstack(func() {
   200  		found := false
   201  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   202  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   203  				found = true
   204  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   205  				gp.m.locksHeldLen--
   206  				break
   207  			}
   208  		}
   209  		if !found {
   210  			println(gp.m.procid, ":", rank.String(), rank)
   211  			throw("lockRank release without matching lockRank acquire")
   212  		}
   213  	})
   214  }
   215  
   216  // nosplit because it may be called from nosplit contexts.
   217  //
   218  //go:nosplit
   219  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   220  	gp := getg()
   221  	if gp.m.locksHeldLen == 0 {
   222  		// No possibility of lock ordering problem if no other locks held
   223  		return
   224  	}
   225  
   226  	systemstack(func() {
   227  		i := gp.m.locksHeldLen
   228  		if i >= len(gp.m.locksHeld) {
   229  			throw("too many locks held concurrently for rank checking")
   230  		}
   231  		// Temporarily add this lock to the locksHeld list, so
   232  		// checkRanks() will print out list, including this lock, if there
   233  		// is a lock ordering problem.
   234  		gp.m.locksHeld[i].rank = rank
   235  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   236  		gp.m.locksHeldLen++
   237  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   238  		gp.m.locksHeldLen--
   239  	})
   240  }
   241  
   242  // nosplit to ensure it can be called in as many contexts as possible.
   243  //
   244  //go:nosplit
   245  func checkLockHeld(gp *g, l *mutex) bool {
   246  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   247  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   248  			return true
   249  		}
   250  	}
   251  	return false
   252  }
   253  
   254  // assertLockHeld throws if l is not held by the caller.
   255  //
   256  // nosplit to ensure it can be called in as many contexts as possible.
   257  //
   258  //go:nosplit
   259  func assertLockHeld(l *mutex) {
   260  	gp := getg()
   261  
   262  	held := checkLockHeld(gp, l)
   263  	if held {
   264  		return
   265  	}
   266  
   267  	// Crash from system stack to avoid splits that may cause
   268  	// additional issues.
   269  	systemstack(func() {
   270  		printlock()
   271  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   272  		printHeldLocks(gp)
   273  		throw("not holding required lock!")
   274  	})
   275  }
   276  
   277  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   278  //
   279  // This is less precise than assertLockHeld, but can be used in places where a
   280  // pointer to the exact mutex is not available.
   281  //
   282  // nosplit to ensure it can be called in as many contexts as possible.
   283  //
   284  //go:nosplit
   285  func assertRankHeld(r lockRank) {
   286  	gp := getg()
   287  
   288  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   289  		if gp.m.locksHeld[i].rank == r {
   290  			return
   291  		}
   292  	}
   293  
   294  	// Crash from system stack to avoid splits that may cause
   295  	// additional issues.
   296  	systemstack(func() {
   297  		printlock()
   298  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   299  		printHeldLocks(gp)
   300  		throw("not holding required lock!")
   301  	})
   302  }
   303  
   304  // worldStopped notes that the world is stopped.
   305  //
   306  // Caller must hold worldsema.
   307  //
   308  // nosplit to ensure it can be called in as many contexts as possible.
   309  //
   310  //go:nosplit
   311  func worldStopped() {
   312  	if stopped := worldIsStopped.Add(1); stopped != 1 {
   313  		systemstack(func() {
   314  			print("world stop count=", stopped, "\n")
   315  			throw("recursive world stop")
   316  		})
   317  	}
   318  }
   319  
   320  // worldStarted that the world is starting.
   321  //
   322  // Caller must hold worldsema.
   323  //
   324  // nosplit to ensure it can be called in as many contexts as possible.
   325  //
   326  //go:nosplit
   327  func worldStarted() {
   328  	if stopped := worldIsStopped.Add(-1); stopped != 0 {
   329  		systemstack(func() {
   330  			print("world stop count=", stopped, "\n")
   331  			throw("released non-stopped world stop")
   332  		})
   333  	}
   334  }
   335  
   336  // nosplit to ensure it can be called in as many contexts as possible.
   337  //
   338  //go:nosplit
   339  func checkWorldStopped() bool {
   340  	stopped := worldIsStopped.Load()
   341  	if stopped > 1 {
   342  		systemstack(func() {
   343  			print("inconsistent world stop count=", stopped, "\n")
   344  			throw("inconsistent world stop count")
   345  		})
   346  	}
   347  
   348  	return stopped == 1
   349  }
   350  
   351  // assertWorldStopped throws if the world is not stopped. It does not check
   352  // which M stopped the world.
   353  //
   354  // nosplit to ensure it can be called in as many contexts as possible.
   355  //
   356  //go:nosplit
   357  func assertWorldStopped() {
   358  	if checkWorldStopped() {
   359  		return
   360  	}
   361  
   362  	throw("world not stopped")
   363  }
   364  
   365  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   366  // passed lock is not held.
   367  //
   368  // nosplit to ensure it can be called in as many contexts as possible.
   369  //
   370  //go:nosplit
   371  func assertWorldStoppedOrLockHeld(l *mutex) {
   372  	if checkWorldStopped() {
   373  		return
   374  	}
   375  
   376  	gp := getg()
   377  	held := checkLockHeld(gp, l)
   378  	if held {
   379  		return
   380  	}
   381  
   382  	// Crash from system stack to avoid splits that may cause
   383  	// additional issues.
   384  	systemstack(func() {
   385  		printlock()
   386  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   387  		println("<no world stop>")
   388  		printHeldLocks(gp)
   389  		throw("no world stop or required lock!")
   390  	})
   391  }
   392  

View as plain text