Black Lives Matter. Support the Equal Justice Initiative.

Source file src/cmd/compile/internal/ssa/writebarrier.go

Documentation: cmd/compile/internal/ssa

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/types"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/objabi"
    11  	"cmd/internal/src"
    12  	"fmt"
    13  )
    14  
    15  // A ZeroRegion records parts of an object which are known to be zero.
    16  // A ZeroRegion only applies to a single memory state.
    17  // Each bit in mask is set if the corresponding pointer-sized word of
    18  // the base object is known to be zero.
    19  // In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
    20  // is known to be zero.
    21  type ZeroRegion struct {
    22  	base *Value
    23  	mask uint64
    24  }
    25  
    26  // needwb reports whether we need write barrier for store op v.
    27  // v must be Store/Move/Zero.
    28  // zeroes provides known zero information (keyed by ID of memory-type values).
    29  func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
    30  	t, ok := v.Aux.(*types.Type)
    31  	if !ok {
    32  		v.Fatalf("store aux is not a type: %s", v.LongString())
    33  	}
    34  	if !t.HasPointers() {
    35  		return false
    36  	}
    37  	if IsStackAddr(v.Args[0]) {
    38  		return false // write on stack doesn't need write barrier
    39  	}
    40  	if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) {
    41  		// Copying data from readonly memory into a fresh object doesn't need a write barrier.
    42  		return false
    43  	}
    44  	if v.Op == OpStore && IsGlobalAddr(v.Args[1]) {
    45  		// Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier.
    46  		ptr := v.Args[0]
    47  		var off int64
    48  		size := v.Aux.(*types.Type).Size()
    49  		for ptr.Op == OpOffPtr {
    50  			off += ptr.AuxInt
    51  			ptr = ptr.Args[0]
    52  		}
    53  		ptrSize := v.Block.Func.Config.PtrSize
    54  		if off%ptrSize != 0 || size%ptrSize != 0 {
    55  			v.Fatalf("unaligned pointer write")
    56  		}
    57  		if off < 0 || off+size > 64*ptrSize {
    58  			// write goes off end of tracked offsets
    59  			return true
    60  		}
    61  		z := zeroes[v.MemoryArg().ID]
    62  		if ptr != z.base {
    63  			return true
    64  		}
    65  		for i := off; i < off+size; i += ptrSize {
    66  			if z.mask>>uint(i/ptrSize)&1 == 0 {
    67  				return true // not known to be zero
    68  			}
    69  		}
    70  		// All written locations are known to be zero - write barrier not needed.
    71  		return false
    72  	}
    73  	return true
    74  }
    75  
    76  // writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
    77  // when necessary (the condition above). It rewrites store ops to branches
    78  // and runtime calls, like
    79  //
    80  // if writeBarrier.enabled {
    81  //   gcWriteBarrier(ptr, val)	// Not a regular Go call
    82  // } else {
    83  //   *ptr = val
    84  // }
    85  //
    86  // A sequence of WB stores for many pointer fields of a single type will
    87  // be emitted together, with a single branch.
    88  func writebarrier(f *Func) {
    89  	if !f.fe.UseWriteBarrier() {
    90  		return
    91  	}
    92  
    93  	var sb, sp, wbaddr, const0 *Value
    94  	var typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym
    95  	var stores, after []*Value
    96  	var sset *sparseSet
    97  	var storeNumber []int32
    98  
    99  	zeroes := f.computeZeroMap()
   100  	for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
   101  		// first, identify all the stores that need to insert a write barrier.
   102  		// mark them with WB ops temporarily. record presence of WB ops.
   103  		nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
   104  		for _, v := range b.Values {
   105  			switch v.Op {
   106  			case OpStore, OpMove, OpZero:
   107  				if needwb(v, zeroes) {
   108  					switch v.Op {
   109  					case OpStore:
   110  						v.Op = OpStoreWB
   111  					case OpMove:
   112  						v.Op = OpMoveWB
   113  					case OpZero:
   114  						v.Op = OpZeroWB
   115  					}
   116  					nWBops++
   117  				}
   118  			}
   119  		}
   120  		if nWBops == 0 {
   121  			continue
   122  		}
   123  
   124  		if wbaddr == nil {
   125  			// lazily initialize global values for write barrier test and calls
   126  			// find SB and SP values in entry block
   127  			initpos := f.Entry.Pos
   128  			sp, sb = f.spSb()
   129  			wbsym := f.fe.Syslook("writeBarrier")
   130  			wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
   131  			gcWriteBarrier = f.fe.Syslook("gcWriteBarrier")
   132  			typedmemmove = f.fe.Syslook("typedmemmove")
   133  			typedmemclr = f.fe.Syslook("typedmemclr")
   134  			const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
   135  
   136  			// allocate auxiliary data structures for computing store order
   137  			sset = f.newSparseSet(f.NumValues())
   138  			defer f.retSparseSet(sset)
   139  			storeNumber = make([]int32, f.NumValues())
   140  		}
   141  
   142  		// order values in store order
   143  		b.Values = storeOrder(b.Values, sset, storeNumber)
   144  
   145  		firstSplit := true
   146  	again:
   147  		// find the start and end of the last contiguous WB store sequence.
   148  		// a branch will be inserted there. values after it will be moved
   149  		// to a new block.
   150  		var last *Value
   151  		var start, end int
   152  		values := b.Values
   153  	FindSeq:
   154  		for i := len(values) - 1; i >= 0; i-- {
   155  			w := values[i]
   156  			switch w.Op {
   157  			case OpStoreWB, OpMoveWB, OpZeroWB:
   158  				start = i
   159  				if last == nil {
   160  					last = w
   161  					end = i + 1
   162  				}
   163  			case OpVarDef, OpVarLive, OpVarKill:
   164  				continue
   165  			default:
   166  				if last == nil {
   167  					continue
   168  				}
   169  				break FindSeq
   170  			}
   171  		}
   172  		stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
   173  		after = append(after[:0], b.Values[end:]...)
   174  		b.Values = b.Values[:start]
   175  
   176  		// find the memory before the WB stores
   177  		mem := stores[0].MemoryArg()
   178  		pos := stores[0].Pos
   179  		bThen := f.NewBlock(BlockPlain)
   180  		bElse := f.NewBlock(BlockPlain)
   181  		bEnd := f.NewBlock(b.Kind)
   182  		bThen.Pos = pos
   183  		bElse.Pos = pos
   184  		bEnd.Pos = b.Pos
   185  		b.Pos = pos
   186  
   187  		// set up control flow for end block
   188  		bEnd.CopyControls(b)
   189  		bEnd.Likely = b.Likely
   190  		for _, e := range b.Succs {
   191  			bEnd.Succs = append(bEnd.Succs, e)
   192  			e.b.Preds[e.i].b = bEnd
   193  		}
   194  
   195  		// set up control flow for write barrier test
   196  		// load word, test word, avoiding partial register write from load byte.
   197  		cfgtypes := &f.Config.Types
   198  		flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
   199  		flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
   200  		b.Kind = BlockIf
   201  		b.SetControl(flag)
   202  		b.Likely = BranchUnlikely
   203  		b.Succs = b.Succs[:0]
   204  		b.AddEdgeTo(bThen)
   205  		b.AddEdgeTo(bElse)
   206  		// TODO: For OpStoreWB and the buffered write barrier,
   207  		// we could move the write out of the write barrier,
   208  		// which would lead to fewer branches. We could do
   209  		// something similar to OpZeroWB, since the runtime
   210  		// could provide just the barrier half and then we
   211  		// could unconditionally do an OpZero (which could
   212  		// also generate better zeroing code). OpMoveWB is
   213  		// trickier and would require changing how
   214  		// cgoCheckMemmove works.
   215  		bThen.AddEdgeTo(bEnd)
   216  		bElse.AddEdgeTo(bEnd)
   217  
   218  		// for each write barrier store, append write barrier version to bThen
   219  		// and simple store version to bElse
   220  		memThen := mem
   221  		memElse := mem
   222  
   223  		// If the source of a MoveWB is volatile (will be clobbered by a
   224  		// function call), we need to copy it to a temporary location, as
   225  		// marshaling the args of typedmemmove might clobber the value we're
   226  		// trying to move.
   227  		// Look for volatile source, copy it to temporary before we emit any
   228  		// call.
   229  		// It is unlikely to have more than one of them. Just do a linear
   230  		// search instead of using a map.
   231  		type volatileCopy struct {
   232  			src *Value // address of original volatile value
   233  			tmp *Value // address of temporary we've copied the volatile value into
   234  		}
   235  		var volatiles []volatileCopy
   236  	copyLoop:
   237  		for _, w := range stores {
   238  			if w.Op == OpMoveWB {
   239  				val := w.Args[1]
   240  				if isVolatile(val) {
   241  					for _, c := range volatiles {
   242  						if val == c.src {
   243  							continue copyLoop // already copied
   244  						}
   245  					}
   246  
   247  					t := val.Type.Elem()
   248  					tmp := f.fe.Auto(w.Pos, t)
   249  					memThen = bThen.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, memThen)
   250  					tmpaddr := bThen.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, memThen)
   251  					siz := t.Size()
   252  					memThen = bThen.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, memThen)
   253  					memThen.Aux = t
   254  					volatiles = append(volatiles, volatileCopy{val, tmpaddr})
   255  				}
   256  			}
   257  		}
   258  
   259  		for _, w := range stores {
   260  			ptr := w.Args[0]
   261  			pos := w.Pos
   262  
   263  			var fn *obj.LSym
   264  			var typ *obj.LSym
   265  			var val *Value
   266  			switch w.Op {
   267  			case OpStoreWB:
   268  				val = w.Args[1]
   269  				nWBops--
   270  			case OpMoveWB:
   271  				fn = typedmemmove
   272  				val = w.Args[1]
   273  				typ = w.Aux.(*types.Type).Symbol()
   274  				nWBops--
   275  			case OpZeroWB:
   276  				fn = typedmemclr
   277  				typ = w.Aux.(*types.Type).Symbol()
   278  				nWBops--
   279  			case OpVarDef, OpVarLive, OpVarKill:
   280  			}
   281  
   282  			// then block: emit write barrier call
   283  			switch w.Op {
   284  			case OpStoreWB, OpMoveWB, OpZeroWB:
   285  				if w.Op == OpStoreWB {
   286  					memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen)
   287  				} else {
   288  					srcval := val
   289  					if w.Op == OpMoveWB && isVolatile(srcval) {
   290  						for _, c := range volatiles {
   291  							if srcval == c.src {
   292  								srcval = c.tmp
   293  								break
   294  							}
   295  						}
   296  					}
   297  					memThen = wbcall(pos, bThen, fn, typ, ptr, srcval, memThen, sp, sb)
   298  				}
   299  				// Note that we set up a writebarrier function call.
   300  				f.fe.SetWBPos(pos)
   301  			case OpVarDef, OpVarLive, OpVarKill:
   302  				memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen)
   303  			}
   304  
   305  			// else block: normal store
   306  			switch w.Op {
   307  			case OpStoreWB:
   308  				memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
   309  			case OpMoveWB:
   310  				memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
   311  				memElse.Aux = w.Aux
   312  			case OpZeroWB:
   313  				memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
   314  				memElse.Aux = w.Aux
   315  			case OpVarDef, OpVarLive, OpVarKill:
   316  				memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse)
   317  			}
   318  		}
   319  
   320  		// mark volatile temps dead
   321  		for _, c := range volatiles {
   322  			tmpNode := c.tmp.Aux
   323  			memThen = bThen.NewValue1A(memThen.Pos, OpVarKill, types.TypeMem, tmpNode, memThen)
   324  		}
   325  
   326  		// merge memory
   327  		// Splice memory Phi into the last memory of the original sequence,
   328  		// which may be used in subsequent blocks. Other memories in the
   329  		// sequence must be dead after this block since there can be only
   330  		// one memory live.
   331  		bEnd.Values = append(bEnd.Values, last)
   332  		last.Block = bEnd
   333  		last.reset(OpPhi)
   334  		last.Pos = last.Pos.WithNotStmt()
   335  		last.Type = types.TypeMem
   336  		last.AddArg(memThen)
   337  		last.AddArg(memElse)
   338  		for _, w := range stores {
   339  			if w != last {
   340  				w.resetArgs()
   341  			}
   342  		}
   343  		for _, w := range stores {
   344  			if w != last {
   345  				f.freeValue(w)
   346  			}
   347  		}
   348  
   349  		// put values after the store sequence into the end block
   350  		bEnd.Values = append(bEnd.Values, after...)
   351  		for _, w := range after {
   352  			w.Block = bEnd
   353  		}
   354  
   355  		// Preemption is unsafe between loading the write
   356  		// barrier-enabled flag and performing the write
   357  		// because that would allow a GC phase transition,
   358  		// which would invalidate the flag. Remember the
   359  		// conditional block so liveness analysis can disable
   360  		// safe-points. This is somewhat subtle because we're
   361  		// splitting b bottom-up.
   362  		if firstSplit {
   363  			// Add b itself.
   364  			b.Func.WBLoads = append(b.Func.WBLoads, b)
   365  			firstSplit = false
   366  		} else {
   367  			// We've already split b, so we just pushed a
   368  			// write barrier test into bEnd.
   369  			b.Func.WBLoads = append(b.Func.WBLoads, bEnd)
   370  		}
   371  
   372  		// if we have more stores in this block, do this block again
   373  		if nWBops > 0 {
   374  			goto again
   375  		}
   376  	}
   377  }
   378  
   379  // computeZeroMap returns a map from an ID of a memory value to
   380  // a set of locations that are known to be zeroed at that memory value.
   381  func (f *Func) computeZeroMap() map[ID]ZeroRegion {
   382  	ptrSize := f.Config.PtrSize
   383  	// Keep track of which parts of memory are known to be zero.
   384  	// This helps with removing write barriers for various initialization patterns.
   385  	// This analysis is conservative. We only keep track, for each memory state, of
   386  	// which of the first 64 words of a single object are known to be zero.
   387  	zeroes := map[ID]ZeroRegion{}
   388  	// Find new objects.
   389  	for _, b := range f.Blocks {
   390  		for _, v := range b.Values {
   391  			if v.Op != OpLoad {
   392  				continue
   393  			}
   394  			mem := v.MemoryArg()
   395  			if IsNewObject(v, mem) {
   396  				nptr := v.Type.Elem().Size() / ptrSize
   397  				if nptr > 64 {
   398  					nptr = 64
   399  				}
   400  				zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
   401  			}
   402  		}
   403  	}
   404  	// Find stores to those new objects.
   405  	for {
   406  		changed := false
   407  		for _, b := range f.Blocks {
   408  			// Note: iterating forwards helps convergence, as values are
   409  			// typically (but not always!) in store order.
   410  			for _, v := range b.Values {
   411  				if v.Op != OpStore {
   412  					continue
   413  				}
   414  				z, ok := zeroes[v.MemoryArg().ID]
   415  				if !ok {
   416  					continue
   417  				}
   418  				ptr := v.Args[0]
   419  				var off int64
   420  				size := v.Aux.(*types.Type).Size()
   421  				for ptr.Op == OpOffPtr {
   422  					off += ptr.AuxInt
   423  					ptr = ptr.Args[0]
   424  				}
   425  				if ptr != z.base {
   426  					// Different base object - we don't know anything.
   427  					// We could even be writing to the base object we know
   428  					// about, but through an aliased but offset pointer.
   429  					// So we have to throw all the zero information we have away.
   430  					continue
   431  				}
   432  				// Round to cover any partially written pointer slots.
   433  				// Pointer writes should never be unaligned like this, but non-pointer
   434  				// writes to pointer-containing types will do this.
   435  				if d := off % ptrSize; d != 0 {
   436  					off -= d
   437  					size += d
   438  				}
   439  				if d := size % ptrSize; d != 0 {
   440  					size += ptrSize - d
   441  				}
   442  				// Clip to the 64 words that we track.
   443  				min := off
   444  				max := off + size
   445  				if min < 0 {
   446  					min = 0
   447  				}
   448  				if max > 64*ptrSize {
   449  					max = 64 * ptrSize
   450  				}
   451  				// Clear bits for parts that we are writing (and hence
   452  				// will no longer necessarily be zero).
   453  				for i := min; i < max; i += ptrSize {
   454  					bit := i / ptrSize
   455  					z.mask &^= 1 << uint(bit)
   456  				}
   457  				if z.mask == 0 {
   458  					// No more known zeros - don't bother keeping.
   459  					continue
   460  				}
   461  				// Save updated known zero contents for new store.
   462  				if zeroes[v.ID] != z {
   463  					zeroes[v.ID] = z
   464  					changed = true
   465  				}
   466  			}
   467  		}
   468  		if !changed {
   469  			break
   470  		}
   471  	}
   472  	if f.pass.debug > 0 {
   473  		fmt.Printf("func %s\n", f.Name)
   474  		for mem, z := range zeroes {
   475  			fmt.Printf("  memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
   476  		}
   477  	}
   478  	return zeroes
   479  }
   480  
   481  // wbcall emits write barrier runtime call in b, returns memory.
   482  func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value) *Value {
   483  	config := b.Func.Config
   484  
   485  	// put arguments on stack
   486  	off := config.ctxt.FixedFrameSize()
   487  
   488  	var ACArgs []Param
   489  	if typ != nil { // for typedmemmove
   490  		taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
   491  		off = round(off, taddr.Type.Alignment())
   492  		arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
   493  		mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
   494  		ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)})
   495  		off += taddr.Type.Size()
   496  	}
   497  
   498  	off = round(off, ptr.Type.Alignment())
   499  	arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
   500  	mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
   501  	ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)})
   502  	off += ptr.Type.Size()
   503  
   504  	if val != nil {
   505  		off = round(off, val.Type.Alignment())
   506  		arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
   507  		mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
   508  		ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)})
   509  		off += val.Type.Size()
   510  	}
   511  	off = round(off, config.PtrSize)
   512  
   513  	// issue call
   514  	mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn, ACArgs, nil), mem)
   515  	mem.AuxInt = off - config.ctxt.FixedFrameSize()
   516  	return mem
   517  }
   518  
   519  // round to a multiple of r, r is a power of 2
   520  func round(o int64, r int64) int64 {
   521  	return (o + r - 1) &^ (r - 1)
   522  }
   523  
   524  // IsStackAddr reports whether v is known to be an address of a stack slot.
   525  func IsStackAddr(v *Value) bool {
   526  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   527  		v = v.Args[0]
   528  	}
   529  	switch v.Op {
   530  	case OpSP, OpLocalAddr, OpSelectNAddr:
   531  		return true
   532  	}
   533  	return false
   534  }
   535  
   536  // IsGlobalAddr reports whether v is known to be an address of a global (or nil).
   537  func IsGlobalAddr(v *Value) bool {
   538  	if v.Op == OpAddr && v.Args[0].Op == OpSB {
   539  		return true // address of a global
   540  	}
   541  	if v.Op == OpConstNil {
   542  		return true
   543  	}
   544  	if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
   545  		return true // loading from a read-only global - the resulting address can't be a heap address.
   546  	}
   547  	return false
   548  }
   549  
   550  // IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
   551  func IsReadOnlyGlobalAddr(v *Value) bool {
   552  	if v.Op == OpConstNil {
   553  		// Nil pointers are read only. See issue 33438.
   554  		return true
   555  	}
   556  	if v.Op == OpAddr && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
   557  		return true
   558  	}
   559  	return false
   560  }
   561  
   562  // IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
   563  func IsNewObject(v *Value, mem *Value) bool {
   564  	if v.Op != OpLoad {
   565  		return false
   566  	}
   567  	if v.MemoryArg() != mem {
   568  		return false
   569  	}
   570  	if mem.Op != OpStaticCall {
   571  		return false
   572  	}
   573  	if !isSameCall(mem.Aux, "runtime.newobject") {
   574  		return false
   575  	}
   576  	if v.Args[0].Op != OpOffPtr {
   577  		return false
   578  	}
   579  	if v.Args[0].Args[0].Op != OpSP {
   580  		return false
   581  	}
   582  	c := v.Block.Func.Config
   583  	if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value
   584  		return false
   585  	}
   586  	return true
   587  }
   588  
   589  // IsSanitizerSafeAddr reports whether v is known to be an address
   590  // that doesn't need instrumentation.
   591  func IsSanitizerSafeAddr(v *Value) bool {
   592  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   593  		v = v.Args[0]
   594  	}
   595  	switch v.Op {
   596  	case OpSP, OpLocalAddr, OpSelectNAddr:
   597  		// Stack addresses are always safe.
   598  		return true
   599  	case OpITab, OpStringPtr, OpGetClosurePtr:
   600  		// Itabs, string data, and closure fields are
   601  		// read-only once initialized.
   602  		return true
   603  	case OpAddr:
   604  		return v.Aux.(*obj.LSym).Type == objabi.SRODATA
   605  	}
   606  	return false
   607  }
   608  
   609  // isVolatile reports whether v is a pointer to argument region on stack which
   610  // will be clobbered by a function call.
   611  func isVolatile(v *Value) bool {
   612  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
   613  		v = v.Args[0]
   614  	}
   615  	return v.Op == OpSP
   616  }
   617  

View as plain text