Black Lives Matter. Support the Equal Justice Initiative.

Source file src/cmd/compile/internal/gc/pgen.go

Documentation: cmd/compile/internal/gc

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"cmd/compile/internal/ssa"
     9  	"cmd/compile/internal/types"
    10  	"cmd/internal/dwarf"
    11  	"cmd/internal/obj"
    12  	"cmd/internal/objabi"
    13  	"cmd/internal/src"
    14  	"cmd/internal/sys"
    15  	"internal/race"
    16  	"math/rand"
    17  	"sort"
    18  	"sync"
    19  	"time"
    20  )
    21  
    22  // "Portable" code generation.
    23  
    24  var (
    25  	nBackendWorkers int     // number of concurrent backend workers, set by a compiler flag
    26  	compilequeue    []*Node // functions waiting to be compiled
    27  )
    28  
    29  func emitptrargsmap(fn *Node) {
    30  	if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
    31  		return
    32  	}
    33  	lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
    34  
    35  	nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
    36  	bv := bvalloc(int32(nptr) * 2)
    37  	nbitmap := 1
    38  	if fn.Type.NumResults() > 0 {
    39  		nbitmap = 2
    40  	}
    41  	off := duint32(lsym, 0, uint32(nbitmap))
    42  	off = duint32(lsym, off, uint32(bv.n))
    43  
    44  	if fn.IsMethod() {
    45  		onebitwalktype1(fn.Type.Recvs(), 0, bv)
    46  	}
    47  	if fn.Type.NumParams() > 0 {
    48  		onebitwalktype1(fn.Type.Params(), 0, bv)
    49  	}
    50  	off = dbvec(lsym, off, bv)
    51  
    52  	if fn.Type.NumResults() > 0 {
    53  		onebitwalktype1(fn.Type.Results(), 0, bv)
    54  		off = dbvec(lsym, off, bv)
    55  	}
    56  
    57  	ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
    58  }
    59  
    60  // cmpstackvarlt reports whether the stack variable a sorts before b.
    61  //
    62  // Sort the list of stack variables. Autos after anything else,
    63  // within autos, unused after used, within used, things with
    64  // pointers first, zeroed things first, and then decreasing size.
    65  // Because autos are laid out in decreasing addresses
    66  // on the stack, pointers first, zeroed things first and decreasing size
    67  // really means, in memory, things with pointers needing zeroing at
    68  // the top of the stack and increasing in size.
    69  // Non-autos sort on offset.
    70  func cmpstackvarlt(a, b *Node) bool {
    71  	if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
    72  		return b.Class() == PAUTO
    73  	}
    74  
    75  	if a.Class() != PAUTO {
    76  		return a.Xoffset < b.Xoffset
    77  	}
    78  
    79  	if a.Name.Used() != b.Name.Used() {
    80  		return a.Name.Used()
    81  	}
    82  
    83  	ap := a.Type.HasPointers()
    84  	bp := b.Type.HasPointers()
    85  	if ap != bp {
    86  		return ap
    87  	}
    88  
    89  	ap = a.Name.Needzero()
    90  	bp = b.Name.Needzero()
    91  	if ap != bp {
    92  		return ap
    93  	}
    94  
    95  	if a.Type.Width != b.Type.Width {
    96  		return a.Type.Width > b.Type.Width
    97  	}
    98  
    99  	return a.Sym.Name < b.Sym.Name
   100  }
   101  
   102  // byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
   103  type byStackVar []*Node
   104  
   105  func (s byStackVar) Len() int           { return len(s) }
   106  func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
   107  func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
   108  
   109  func (s *ssafn) AllocFrame(f *ssa.Func) {
   110  	s.stksize = 0
   111  	s.stkptrsize = 0
   112  	fn := s.curfn.Func
   113  
   114  	// Mark the PAUTO's unused.
   115  	for _, ln := range fn.Dcl {
   116  		if ln.Class() == PAUTO {
   117  			ln.Name.SetUsed(false)
   118  		}
   119  	}
   120  
   121  	for _, l := range f.RegAlloc {
   122  		if ls, ok := l.(ssa.LocalSlot); ok {
   123  			ls.N.(*Node).Name.SetUsed(true)
   124  		}
   125  	}
   126  
   127  	scratchUsed := false
   128  	for _, b := range f.Blocks {
   129  		for _, v := range b.Values {
   130  			if n, ok := v.Aux.(*Node); ok {
   131  				switch n.Class() {
   132  				case PPARAM, PPARAMOUT:
   133  					// Don't modify nodfp; it is a global.
   134  					if n != nodfp {
   135  						n.Name.SetUsed(true)
   136  					}
   137  				case PAUTO:
   138  					n.Name.SetUsed(true)
   139  				}
   140  			}
   141  			if !scratchUsed {
   142  				scratchUsed = v.Op.UsesScratch()
   143  			}
   144  
   145  		}
   146  	}
   147  
   148  	if f.Config.NeedsFpScratch && scratchUsed {
   149  		s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
   150  	}
   151  
   152  	sort.Sort(byStackVar(fn.Dcl))
   153  
   154  	// Reassign stack offsets of the locals that are used.
   155  	lastHasPtr := false
   156  	for i, n := range fn.Dcl {
   157  		if n.Op != ONAME || n.Class() != PAUTO {
   158  			continue
   159  		}
   160  		if !n.Name.Used() {
   161  			fn.Dcl = fn.Dcl[:i]
   162  			break
   163  		}
   164  
   165  		dowidth(n.Type)
   166  		w := n.Type.Width
   167  		if w >= thearch.MAXWIDTH || w < 0 {
   168  			Fatalf("bad width")
   169  		}
   170  		if w == 0 && lastHasPtr {
   171  			// Pad between a pointer-containing object and a zero-sized object.
   172  			// This prevents a pointer to the zero-sized object from being interpreted
   173  			// as a pointer to the pointer-containing object (and causing it
   174  			// to be scanned when it shouldn't be). See issue 24993.
   175  			w = 1
   176  		}
   177  		s.stksize += w
   178  		s.stksize = Rnd(s.stksize, int64(n.Type.Align))
   179  		if n.Type.HasPointers() {
   180  			s.stkptrsize = s.stksize
   181  			lastHasPtr = true
   182  		} else {
   183  			lastHasPtr = false
   184  		}
   185  		if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
   186  			s.stksize = Rnd(s.stksize, int64(Widthptr))
   187  		}
   188  		n.Xoffset = -s.stksize
   189  	}
   190  
   191  	s.stksize = Rnd(s.stksize, int64(Widthreg))
   192  	s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
   193  }
   194  
   195  func funccompile(fn *Node) {
   196  	if Curfn != nil {
   197  		Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
   198  	}
   199  
   200  	if fn.Type == nil {
   201  		if nerrors == 0 {
   202  			Fatalf("funccompile missing type")
   203  		}
   204  		return
   205  	}
   206  
   207  	// assign parameter offsets
   208  	dowidth(fn.Type)
   209  
   210  	if fn.Nbody.Len() == 0 {
   211  		// Initialize ABI wrappers if necessary.
   212  		fn.Func.initLSym(false)
   213  		emitptrargsmap(fn)
   214  		return
   215  	}
   216  
   217  	dclcontext = PAUTO
   218  	Curfn = fn
   219  
   220  	compile(fn)
   221  
   222  	Curfn = nil
   223  	dclcontext = PEXTERN
   224  }
   225  
   226  func compile(fn *Node) {
   227  	saveerrors()
   228  
   229  	order(fn)
   230  	if nerrors != 0 {
   231  		return
   232  	}
   233  
   234  	// Set up the function's LSym early to avoid data races with the assemblers.
   235  	// Do this before walk, as walk needs the LSym to set attributes/relocations
   236  	// (e.g. in markTypeUsedInInterface).
   237  	fn.Func.initLSym(true)
   238  
   239  	walk(fn)
   240  	if nerrors != 0 {
   241  		return
   242  	}
   243  	if instrumenting {
   244  		instrument(fn)
   245  	}
   246  
   247  	// From this point, there should be no uses of Curfn. Enforce that.
   248  	Curfn = nil
   249  
   250  	if fn.funcname() == "_" {
   251  		// We don't need to generate code for this function, just report errors in its body.
   252  		// At this point we've generated any errors needed.
   253  		// (Beyond here we generate only non-spec errors, like "stack frame too large".)
   254  		// See issue 29870.
   255  		return
   256  	}
   257  
   258  	// Make sure type syms are declared for all types that might
   259  	// be types of stack objects. We need to do this here
   260  	// because symbols must be allocated before the parallel
   261  	// phase of the compiler.
   262  	for _, n := range fn.Func.Dcl {
   263  		switch n.Class() {
   264  		case PPARAM, PPARAMOUT, PAUTO:
   265  			if livenessShouldTrack(n) && n.Name.Addrtaken() {
   266  				dtypesym(n.Type)
   267  				// Also make sure we allocate a linker symbol
   268  				// for the stack object data, for the same reason.
   269  				if fn.Func.lsym.Func().StackObjects == nil {
   270  					fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
   271  				}
   272  			}
   273  		}
   274  	}
   275  
   276  	if compilenow(fn) {
   277  		compileSSA(fn, 0)
   278  	} else {
   279  		compilequeue = append(compilequeue, fn)
   280  	}
   281  }
   282  
   283  // compilenow reports whether to compile immediately.
   284  // If functions are not compiled immediately,
   285  // they are enqueued in compilequeue,
   286  // which is drained by compileFunctions.
   287  func compilenow(fn *Node) bool {
   288  	// Issue 38068: if this function is a method AND an inline
   289  	// candidate AND was not inlined (yet), put it onto the compile
   290  	// queue instead of compiling it immediately. This is in case we
   291  	// wind up inlining it into a method wrapper that is generated by
   292  	// compiling a function later on in the xtop list.
   293  	if fn.IsMethod() && isInlinableButNotInlined(fn) {
   294  		return false
   295  	}
   296  	return nBackendWorkers == 1 && Debug_compilelater == 0
   297  }
   298  
   299  // isInlinableButNotInlined returns true if 'fn' was marked as an
   300  // inline candidate but then never inlined (presumably because we
   301  // found no call sites).
   302  func isInlinableButNotInlined(fn *Node) bool {
   303  	if fn.Func.Nname.Func.Inl == nil {
   304  		return false
   305  	}
   306  	if fn.Sym == nil {
   307  		return true
   308  	}
   309  	return !fn.Sym.Linksym().WasInlined()
   310  }
   311  
   312  const maxStackSize = 1 << 30
   313  
   314  // compileSSA builds an SSA backend function,
   315  // uses it to generate a plist,
   316  // and flushes that plist to machine code.
   317  // worker indicates which of the backend workers is doing the processing.
   318  func compileSSA(fn *Node, worker int) {
   319  	f := buildssa(fn, worker)
   320  	// Note: check arg size to fix issue 25507.
   321  	if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
   322  		largeStackFramesMu.Lock()
   323  		largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
   324  		largeStackFramesMu.Unlock()
   325  		return
   326  	}
   327  	pp := newProgs(fn, worker)
   328  	defer pp.Free()
   329  	genssa(f, pp)
   330  	// Check frame size again.
   331  	// The check above included only the space needed for local variables.
   332  	// After genssa, the space needed includes local variables and the callee arg region.
   333  	// We must do this check prior to calling pp.Flush.
   334  	// If there are any oversized stack frames,
   335  	// the assembler may emit inscrutable complaints about invalid instructions.
   336  	if pp.Text.To.Offset >= maxStackSize {
   337  		largeStackFramesMu.Lock()
   338  		locals := f.Frontend().(*ssafn).stksize
   339  		largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
   340  		largeStackFramesMu.Unlock()
   341  		return
   342  	}
   343  
   344  	pp.Flush() // assemble, fill in boilerplate, etc.
   345  	// fieldtrack must be called after pp.Flush. See issue 20014.
   346  	fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
   347  }
   348  
   349  func init() {
   350  	if race.Enabled {
   351  		rand.Seed(time.Now().UnixNano())
   352  	}
   353  }
   354  
   355  // compileFunctions compiles all functions in compilequeue.
   356  // It fans out nBackendWorkers to do the work
   357  // and waits for them to complete.
   358  func compileFunctions() {
   359  	if len(compilequeue) != 0 {
   360  		sizeCalculationDisabled = true // not safe to calculate sizes concurrently
   361  		if race.Enabled {
   362  			// Randomize compilation order to try to shake out races.
   363  			tmp := make([]*Node, len(compilequeue))
   364  			perm := rand.Perm(len(compilequeue))
   365  			for i, v := range perm {
   366  				tmp[v] = compilequeue[i]
   367  			}
   368  			copy(compilequeue, tmp)
   369  		} else {
   370  			// Compile the longest functions first,
   371  			// since they're most likely to be the slowest.
   372  			// This helps avoid stragglers.
   373  			sort.Slice(compilequeue, func(i, j int) bool {
   374  				return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
   375  			})
   376  		}
   377  		var wg sync.WaitGroup
   378  		Ctxt.InParallel = true
   379  		c := make(chan *Node, nBackendWorkers)
   380  		for i := 0; i < nBackendWorkers; i++ {
   381  			wg.Add(1)
   382  			go func(worker int) {
   383  				for fn := range c {
   384  					compileSSA(fn, worker)
   385  				}
   386  				wg.Done()
   387  			}(i)
   388  		}
   389  		for _, fn := range compilequeue {
   390  			c <- fn
   391  		}
   392  		close(c)
   393  		compilequeue = nil
   394  		wg.Wait()
   395  		Ctxt.InParallel = false
   396  		sizeCalculationDisabled = false
   397  	}
   398  }
   399  
   400  func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
   401  	fn := curfn.(*Node)
   402  	if fn.Func.Nname != nil {
   403  		if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
   404  			Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
   405  		}
   406  	}
   407  
   408  	var apdecls []*Node
   409  	// Populate decls for fn.
   410  	for _, n := range fn.Func.Dcl {
   411  		if n.Op != ONAME { // might be OTYPE or OLITERAL
   412  			continue
   413  		}
   414  		switch n.Class() {
   415  		case PAUTO:
   416  			if !n.Name.Used() {
   417  				// Text == nil -> generating abstract function
   418  				if fnsym.Func().Text != nil {
   419  					Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
   420  				}
   421  				continue
   422  			}
   423  		case PPARAM, PPARAMOUT:
   424  		default:
   425  			continue
   426  		}
   427  		apdecls = append(apdecls, n)
   428  		fnsym.Func().RecordAutoType(ngotype(n).Linksym())
   429  	}
   430  
   431  	decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
   432  
   433  	// For each type referenced by the functions auto vars but not
   434  	// already referenced by a dwarf var, attach a dummy relocation to
   435  	// the function symbol to insure that the type included in DWARF
   436  	// processing during linking.
   437  	typesyms := []*obj.LSym{}
   438  	for t, _ := range fnsym.Func().Autot {
   439  		typesyms = append(typesyms, t)
   440  	}
   441  	sort.Sort(obj.BySymName(typesyms))
   442  	for _, sym := range typesyms {
   443  		r := obj.Addrel(infosym)
   444  		r.Sym = sym
   445  		r.Type = objabi.R_USETYPE
   446  	}
   447  	fnsym.Func().Autot = nil
   448  
   449  	var varScopes []ScopeID
   450  	for _, decl := range decls {
   451  		pos := declPos(decl)
   452  		varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
   453  	}
   454  
   455  	scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
   456  	var inlcalls dwarf.InlCalls
   457  	if genDwarfInline > 0 {
   458  		inlcalls = assembleInlines(fnsym, dwarfVars)
   459  	}
   460  	return scopes, inlcalls
   461  }
   462  
   463  func declPos(decl *Node) src.XPos {
   464  	if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
   465  		// It's not clear which position is correct for captured variables here:
   466  		// * decl.Pos is the wrong position for captured variables, in the inner
   467  		//   function, but it is the right position in the outer function.
   468  		// * decl.Name.Defn is nil for captured variables that were arguments
   469  		//   on the outer function, however the decl.Pos for those seems to be
   470  		//   correct.
   471  		// * decl.Name.Defn is the "wrong" thing for variables declared in the
   472  		//   header of a type switch, it's their position in the header, rather
   473  		//   than the position of the case statement. In principle this is the
   474  		//   right thing, but here we prefer the latter because it makes each
   475  		//   instance of the header variable local to the lexical block of its
   476  		//   case statement.
   477  		// This code is probably wrong for type switch variables that are also
   478  		// captured.
   479  		return decl.Name.Defn.Pos
   480  	}
   481  	return decl.Pos
   482  }
   483  
   484  // createSimpleVars creates a DWARF entry for every variable declared in the
   485  // function, claiming that they are permanently on the stack.
   486  func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
   487  	var vars []*dwarf.Var
   488  	var decls []*Node
   489  	selected := make(map[*Node]bool)
   490  	for _, n := range apDecls {
   491  		if n.IsAutoTmp() {
   492  			continue
   493  		}
   494  
   495  		decls = append(decls, n)
   496  		vars = append(vars, createSimpleVar(fnsym, n))
   497  		selected[n] = true
   498  	}
   499  	return decls, vars, selected
   500  }
   501  
   502  func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
   503  	var abbrev int
   504  	offs := n.Xoffset
   505  
   506  	switch n.Class() {
   507  	case PAUTO:
   508  		abbrev = dwarf.DW_ABRV_AUTO
   509  		if Ctxt.FixedFrameSize() == 0 {
   510  			offs -= int64(Widthptr)
   511  		}
   512  		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
   513  			// There is a word space for FP on ARM64 even if the frame pointer is disabled
   514  			offs -= int64(Widthptr)
   515  		}
   516  
   517  	case PPARAM, PPARAMOUT:
   518  		abbrev = dwarf.DW_ABRV_PARAM
   519  		offs += Ctxt.FixedFrameSize()
   520  	default:
   521  		Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
   522  	}
   523  
   524  	typename := dwarf.InfoPrefix + typesymname(n.Type)
   525  	delete(fnsym.Func().Autot, ngotype(n).Linksym())
   526  	inlIndex := 0
   527  	if genDwarfInline > 1 {
   528  		if n.Name.InlFormal() || n.Name.InlLocal() {
   529  			inlIndex = posInlIndex(n.Pos) + 1
   530  			if n.Name.InlFormal() {
   531  				abbrev = dwarf.DW_ABRV_PARAM
   532  			}
   533  		}
   534  	}
   535  	declpos := Ctxt.InnermostPos(declPos(n))
   536  	return &dwarf.Var{
   537  		Name:          n.Sym.Name,
   538  		IsReturnValue: n.Class() == PPARAMOUT,
   539  		IsInlFormal:   n.Name.InlFormal(),
   540  		Abbrev:        abbrev,
   541  		StackOffset:   int32(offs),
   542  		Type:          Ctxt.Lookup(typename),
   543  		DeclFile:      declpos.RelFilename(),
   544  		DeclLine:      declpos.RelLine(),
   545  		DeclCol:       declpos.Col(),
   546  		InlIndex:      int32(inlIndex),
   547  		ChildIndex:    -1,
   548  	}
   549  }
   550  
   551  // createComplexVars creates recomposed DWARF vars with location lists,
   552  // suitable for describing optimized code.
   553  func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
   554  	debugInfo := fn.DebugInfo
   555  
   556  	// Produce a DWARF variable entry for each user variable.
   557  	var decls []*Node
   558  	var vars []*dwarf.Var
   559  	ssaVars := make(map[*Node]bool)
   560  
   561  	for varID, dvar := range debugInfo.Vars {
   562  		n := dvar.(*Node)
   563  		ssaVars[n] = true
   564  		for _, slot := range debugInfo.VarSlots[varID] {
   565  			ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
   566  		}
   567  
   568  		if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
   569  			decls = append(decls, n)
   570  			vars = append(vars, dvar)
   571  		}
   572  	}
   573  
   574  	return decls, vars, ssaVars
   575  }
   576  
   577  // createDwarfVars process fn, returning a list of DWARF variables and the
   578  // Nodes they represent.
   579  func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
   580  	// Collect a raw list of DWARF vars.
   581  	var vars []*dwarf.Var
   582  	var decls []*Node
   583  	var selected map[*Node]bool
   584  	if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
   585  		decls, vars, selected = createComplexVars(fnsym, fn)
   586  	} else {
   587  		decls, vars, selected = createSimpleVars(fnsym, apDecls)
   588  	}
   589  
   590  	dcl := apDecls
   591  	if fnsym.WasInlined() {
   592  		dcl = preInliningDcls(fnsym)
   593  	}
   594  
   595  	// If optimization is enabled, the list above will typically be
   596  	// missing some of the original pre-optimization variables in the
   597  	// function (they may have been promoted to registers, folded into
   598  	// constants, dead-coded away, etc).  Input arguments not eligible
   599  	// for SSA optimization are also missing.  Here we add back in entries
   600  	// for selected missing vars. Note that the recipe below creates a
   601  	// conservative location. The idea here is that we want to
   602  	// communicate to the user that "yes, there is a variable named X
   603  	// in this function, but no, I don't have enough information to
   604  	// reliably report its contents."
   605  	// For non-SSA-able arguments, however, the correct information
   606  	// is known -- they have a single home on the stack.
   607  	for _, n := range dcl {
   608  		if _, found := selected[n]; found {
   609  			continue
   610  		}
   611  		c := n.Sym.Name[0]
   612  		if c == '.' || n.Type.IsUntyped() {
   613  			continue
   614  		}
   615  		if n.Class() == PPARAM && !canSSAType(n.Type) {
   616  			// SSA-able args get location lists, and may move in and
   617  			// out of registers, so those are handled elsewhere.
   618  			// Autos and named output params seem to get handled
   619  			// with VARDEF, which creates location lists.
   620  			// Args not of SSA-able type are treated here; they
   621  			// are homed on the stack in a single place for the
   622  			// entire call.
   623  			vars = append(vars, createSimpleVar(fnsym, n))
   624  			decls = append(decls, n)
   625  			continue
   626  		}
   627  		typename := dwarf.InfoPrefix + typesymname(n.Type)
   628  		decls = append(decls, n)
   629  		abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
   630  		isReturnValue := (n.Class() == PPARAMOUT)
   631  		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
   632  			abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   633  		} else if n.Class() == PAUTOHEAP {
   634  			// If dcl in question has been promoted to heap, do a bit
   635  			// of extra work to recover original class (auto or param);
   636  			// see issue 30908. This insures that we get the proper
   637  			// signature in the abstract function DIE, but leaves a
   638  			// misleading location for the param (we want pointer-to-heap
   639  			// and not stack).
   640  			// TODO(thanm): generate a better location expression
   641  			stackcopy := n.Name.Param.Stackcopy
   642  			if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
   643  				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   644  				isReturnValue = (stackcopy.Class() == PPARAMOUT)
   645  			}
   646  		}
   647  		inlIndex := 0
   648  		if genDwarfInline > 1 {
   649  			if n.Name.InlFormal() || n.Name.InlLocal() {
   650  				inlIndex = posInlIndex(n.Pos) + 1
   651  				if n.Name.InlFormal() {
   652  					abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   653  				}
   654  			}
   655  		}
   656  		declpos := Ctxt.InnermostPos(n.Pos)
   657  		vars = append(vars, &dwarf.Var{
   658  			Name:          n.Sym.Name,
   659  			IsReturnValue: isReturnValue,
   660  			Abbrev:        abbrev,
   661  			StackOffset:   int32(n.Xoffset),
   662  			Type:          Ctxt.Lookup(typename),
   663  			DeclFile:      declpos.RelFilename(),
   664  			DeclLine:      declpos.RelLine(),
   665  			DeclCol:       declpos.Col(),
   666  			InlIndex:      int32(inlIndex),
   667  			ChildIndex:    -1,
   668  		})
   669  		// Record go type of to insure that it gets emitted by the linker.
   670  		fnsym.Func().RecordAutoType(ngotype(n).Linksym())
   671  	}
   672  
   673  	return decls, vars
   674  }
   675  
   676  // Given a function that was inlined at some point during the
   677  // compilation, return a sorted list of nodes corresponding to the
   678  // autos/locals in that function prior to inlining. If this is a
   679  // function that is not local to the package being compiled, then the
   680  // names of the variables may have been "versioned" to avoid conflicts
   681  // with local vars; disregard this versioning when sorting.
   682  func preInliningDcls(fnsym *obj.LSym) []*Node {
   683  	fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
   684  	var rdcl []*Node
   685  	for _, n := range fn.Func.Inl.Dcl {
   686  		c := n.Sym.Name[0]
   687  		// Avoid reporting "_" parameters, since if there are more than
   688  		// one, it can result in a collision later on, as in #23179.
   689  		if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
   690  			continue
   691  		}
   692  		rdcl = append(rdcl, n)
   693  	}
   694  	return rdcl
   695  }
   696  
   697  // stackOffset returns the stack location of a LocalSlot relative to the
   698  // stack pointer, suitable for use in a DWARF location entry. This has nothing
   699  // to do with its offset in the user variable.
   700  func stackOffset(slot ssa.LocalSlot) int32 {
   701  	n := slot.N.(*Node)
   702  	var base int64
   703  	switch n.Class() {
   704  	case PAUTO:
   705  		if Ctxt.FixedFrameSize() == 0 {
   706  			base -= int64(Widthptr)
   707  		}
   708  		if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
   709  			// There is a word space for FP on ARM64 even if the frame pointer is disabled
   710  			base -= int64(Widthptr)
   711  		}
   712  	case PPARAM, PPARAMOUT:
   713  		base += Ctxt.FixedFrameSize()
   714  	}
   715  	return int32(base + n.Xoffset + slot.Off)
   716  }
   717  
   718  // createComplexVar builds a single DWARF variable entry and location list.
   719  func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
   720  	debug := fn.DebugInfo
   721  	n := debug.Vars[varID].(*Node)
   722  
   723  	var abbrev int
   724  	switch n.Class() {
   725  	case PAUTO:
   726  		abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
   727  	case PPARAM, PPARAMOUT:
   728  		abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   729  	default:
   730  		return nil
   731  	}
   732  
   733  	gotype := ngotype(n).Linksym()
   734  	delete(fnsym.Func().Autot, gotype)
   735  	typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
   736  	inlIndex := 0
   737  	if genDwarfInline > 1 {
   738  		if n.Name.InlFormal() || n.Name.InlLocal() {
   739  			inlIndex = posInlIndex(n.Pos) + 1
   740  			if n.Name.InlFormal() {
   741  				abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
   742  			}
   743  		}
   744  	}
   745  	declpos := Ctxt.InnermostPos(n.Pos)
   746  	dvar := &dwarf.Var{
   747  		Name:          n.Sym.Name,
   748  		IsReturnValue: n.Class() == PPARAMOUT,
   749  		IsInlFormal:   n.Name.InlFormal(),
   750  		Abbrev:        abbrev,
   751  		Type:          Ctxt.Lookup(typename),
   752  		// The stack offset is used as a sorting key, so for decomposed
   753  		// variables just give it the first one. It's not used otherwise.
   754  		// This won't work well if the first slot hasn't been assigned a stack
   755  		// location, but it's not obvious how to do better.
   756  		StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
   757  		DeclFile:    declpos.RelFilename(),
   758  		DeclLine:    declpos.RelLine(),
   759  		DeclCol:     declpos.Col(),
   760  		InlIndex:    int32(inlIndex),
   761  		ChildIndex:  -1,
   762  	}
   763  	list := debug.LocationLists[varID]
   764  	if len(list) != 0 {
   765  		dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
   766  			debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
   767  		}
   768  	}
   769  	return dvar
   770  }
   771  
   772  // fieldtrack adds R_USEFIELD relocations to fnsym to record any
   773  // struct fields that it used.
   774  func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
   775  	if fnsym == nil {
   776  		return
   777  	}
   778  	if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
   779  		return
   780  	}
   781  
   782  	trackSyms := make([]*types.Sym, 0, len(tracked))
   783  	for sym := range tracked {
   784  		trackSyms = append(trackSyms, sym)
   785  	}
   786  	sort.Sort(symByName(trackSyms))
   787  	for _, sym := range trackSyms {
   788  		r := obj.Addrel(fnsym)
   789  		r.Sym = sym.Linksym()
   790  		r.Type = objabi.R_USEFIELD
   791  	}
   792  }
   793  
   794  type symByName []*types.Sym
   795  
   796  func (a symByName) Len() int           { return len(a) }
   797  func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
   798  func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
   799  

View as plain text