Source file src/cmd/compile/internal/wasm/ssa.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package wasm
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/objw"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/ssagen"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/wasm"
    17  	"internal/buildcfg"
    18  )
    19  
    20  /*
    21  
    22     Wasm implementation
    23     -------------------
    24  
    25     Wasm is a strange Go port because the machine isn't
    26     a register-based machine, threads are different, code paths
    27     are different, etc. We outline those differences here.
    28  
    29     See the design doc for some additional info on this topic.
    30     https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4/edit#heading=h.mjo1bish3xni
    31  
    32     PCs:
    33  
    34     Wasm doesn't have PCs in the normal sense that you can jump
    35     to or call to. Instead, we simulate these PCs using our own construct.
    36  
    37     A PC in the Wasm implementation is the combination of a function
    38     ID and a block ID within that function. The function ID is an index
    39     into a function table which transfers control to the start of the
    40     function in question, and the block ID is a sequential integer
    41     indicating where in the function we are.
    42  
    43     Every function starts with a branch table which transfers control
    44     to the place in the function indicated by the block ID. The block
    45     ID is provided to the function as the sole Wasm argument.
    46  
    47     Block IDs do not encode every possible PC. They only encode places
    48     in the function where it might be suspended. Typically these places
    49     are call sites.
    50  
    51     Sometimes we encode the function ID and block ID separately. When
    52     recorded together as a single integer, we use the value F<<16+B.
    53  
    54     Threads:
    55  
    56     Wasm doesn't (yet) have threads. We have to simulate threads by
    57     keeping goroutine stacks in linear memory and unwinding
    58     the Wasm stack each time we want to switch goroutines.
    59  
    60     To support unwinding a stack, each function call returns on the Wasm
    61     stack a boolean that tells the function whether it should return
    62     immediately or not. When returning immediately, a return address
    63     is left on the top of the Go stack indicating where the goroutine
    64     should be resumed.
    65  
    66     Stack pointer:
    67  
    68     There is a single global stack pointer which records the stack pointer
    69     used by the currently active goroutine. This is just an address in
    70     linear memory where the Go runtime is maintaining the stack for that
    71     goroutine.
    72  
    73     Functions cache the global stack pointer in a local variable for
    74     faster access, but any changes must be spilled to the global variable
    75     before any call and restored from the global variable after any call.
    76  
    77     Calling convention:
    78  
    79     All Go arguments and return values are passed on the Go stack, not
    80     the wasm stack. In addition, return addresses are pushed on the
    81     Go stack at every call point. Return addresses are not used during
    82     normal execution, they are used only when resuming goroutines.
    83     (So they are not really a "return address", they are a "resume address".)
    84  
    85     All Go functions have the Wasm type (i32)->i32. The argument
    86     is the block ID and the return value is the exit immediately flag.
    87  
    88     Callsite:
    89      - write arguments to the Go stack (starting at SP+0)
    90      - push return address to Go stack (8 bytes)
    91      - write local SP to global SP
    92      - push 0 (type i32) to Wasm stack
    93      - issue Call
    94      - restore local SP from global SP
    95      - pop int32 from top of Wasm stack. If nonzero, exit function immediately.
    96      - use results from Go stack (starting at SP+sizeof(args))
    97         - note that the callee will have popped the return address
    98  
    99     Prologue:
   100      - initialize local SP from global SP
   101      - jump to the location indicated by the block ID argument
   102        (which appears in local variable 0)
   103      - at block 0
   104        - check for Go stack overflow, call morestack if needed
   105        - subtract frame size from SP
   106        - note that arguments now start at SP+framesize+8
   107  
   108     Normal epilogue:
   109      - pop frame from Go stack
   110      - pop return address from Go stack
   111      - push 0 (type i32) on the Wasm stack
   112      - return
   113     Exit immediately epilogue:
   114      - push 1 (type i32) on the Wasm stack
   115      - return
   116      - note that the return address and stack frame are left on the Go stack
   117  
   118     The main loop that executes goroutines is wasm_pc_f_loop, in
   119     runtime/rt0_js_wasm.s. It grabs the saved return address from
   120     the top of the Go stack (actually SP-8?), splits it up into F
   121     and B parts, then calls F with its Wasm argument set to B.
   122  
   123     Note that when resuming a goroutine, only the most recent function
   124     invocation of that goroutine appears on the Wasm stack. When that
   125     Wasm function returns normally, the next most recent frame will
   126     then be started up by wasm_pc_f_loop.
   127  
   128     Global 0 is SP (stack pointer)
   129     Global 1 is CTXT (closure pointer)
   130     Global 2 is GP (goroutine pointer)
   131  */
   132  
   133  func Init(arch *ssagen.ArchInfo) {
   134  	arch.LinkArch = &wasm.Linkwasm
   135  	arch.REGSP = wasm.REG_SP
   136  	arch.MAXWIDTH = 1 << 50
   137  
   138  	arch.ZeroRange = zeroRange
   139  	arch.Ginsnop = ginsnop
   140  
   141  	arch.SSAMarkMoves = ssaMarkMoves
   142  	arch.SSAGenValue = ssaGenValue
   143  	arch.SSAGenBlock = ssaGenBlock
   144  }
   145  
   146  func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
   147  	if cnt == 0 {
   148  		return p
   149  	}
   150  	if cnt%8 != 0 {
   151  		base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
   152  	}
   153  
   154  	for i := int64(0); i < cnt; i += 8 {
   155  		p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
   156  		p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
   157  		p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
   158  	}
   159  
   160  	return p
   161  }
   162  
   163  func ginsnop(pp *objw.Progs) *obj.Prog {
   164  	return pp.Prog(wasm.ANop)
   165  }
   166  
   167  func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
   168  }
   169  
   170  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   171  	switch b.Kind {
   172  	case ssa.BlockPlain:
   173  		if next != b.Succs[0].Block() {
   174  			s.Br(obj.AJMP, b.Succs[0].Block())
   175  		}
   176  
   177  	case ssa.BlockIf:
   178  		switch next {
   179  		case b.Succs[0].Block():
   180  			// if false, jump to b.Succs[1]
   181  			getValue32(s, b.Controls[0])
   182  			s.Prog(wasm.AI32Eqz)
   183  			s.Prog(wasm.AIf)
   184  			s.Br(obj.AJMP, b.Succs[1].Block())
   185  			s.Prog(wasm.AEnd)
   186  		case b.Succs[1].Block():
   187  			// if true, jump to b.Succs[0]
   188  			getValue32(s, b.Controls[0])
   189  			s.Prog(wasm.AIf)
   190  			s.Br(obj.AJMP, b.Succs[0].Block())
   191  			s.Prog(wasm.AEnd)
   192  		default:
   193  			// if true, jump to b.Succs[0], else jump to b.Succs[1]
   194  			getValue32(s, b.Controls[0])
   195  			s.Prog(wasm.AIf)
   196  			s.Br(obj.AJMP, b.Succs[0].Block())
   197  			s.Prog(wasm.AEnd)
   198  			s.Br(obj.AJMP, b.Succs[1].Block())
   199  		}
   200  
   201  	case ssa.BlockRet:
   202  		s.Prog(obj.ARET)
   203  
   204  	case ssa.BlockExit, ssa.BlockRetJmp:
   205  
   206  	case ssa.BlockDefer:
   207  		p := s.Prog(wasm.AGet)
   208  		p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
   209  		s.Prog(wasm.AI64Eqz)
   210  		s.Prog(wasm.AI32Eqz)
   211  		s.Prog(wasm.AIf)
   212  		s.Br(obj.AJMP, b.Succs[1].Block())
   213  		s.Prog(wasm.AEnd)
   214  		if next != b.Succs[0].Block() {
   215  			s.Br(obj.AJMP, b.Succs[0].Block())
   216  		}
   217  
   218  	default:
   219  		panic("unexpected block")
   220  	}
   221  
   222  	// Entry point for the next block. Used by the JMP in goToBlock.
   223  	s.Prog(wasm.ARESUMEPOINT)
   224  
   225  	if s.OnWasmStackSkipped != 0 {
   226  		panic("wasm: bad stack")
   227  	}
   228  }
   229  
   230  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   231  	switch v.Op {
   232  	case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall:
   233  		s.PrepareCall(v)
   234  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
   235  			// The runtime needs to inject jumps to
   236  			// deferreturn calls using the address in
   237  			// _func.deferreturn. Hence, the call to
   238  			// deferreturn must itself be a resumption
   239  			// point so it gets a target PC.
   240  			s.Prog(wasm.ARESUMEPOINT)
   241  		}
   242  		if v.Op == ssa.OpWasmLoweredClosureCall {
   243  			getValue64(s, v.Args[1])
   244  			setReg(s, wasm.REG_CTXT)
   245  		}
   246  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
   247  			sym := call.Fn
   248  			p := s.Prog(obj.ACALL)
   249  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
   250  			p.Pos = v.Pos
   251  			if v.Op == ssa.OpWasmLoweredTailCall {
   252  				p.As = obj.ARET
   253  			}
   254  		} else {
   255  			getValue64(s, v.Args[0])
   256  			p := s.Prog(obj.ACALL)
   257  			p.To = obj.Addr{Type: obj.TYPE_NONE}
   258  			p.Pos = v.Pos
   259  		}
   260  
   261  	case ssa.OpWasmLoweredMove:
   262  		getValue32(s, v.Args[0])
   263  		getValue32(s, v.Args[1])
   264  		i32Const(s, int32(v.AuxInt))
   265  		s.Prog(wasm.AMemoryCopy)
   266  
   267  	case ssa.OpWasmLoweredZero:
   268  		getValue32(s, v.Args[0])
   269  		i32Const(s, 0)
   270  		i32Const(s, int32(v.AuxInt))
   271  		s.Prog(wasm.AMemoryFill)
   272  
   273  	case ssa.OpWasmLoweredNilCheck:
   274  		getValue64(s, v.Args[0])
   275  		s.Prog(wasm.AI64Eqz)
   276  		s.Prog(wasm.AIf)
   277  		p := s.Prog(wasm.ACALLNORESUME)
   278  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
   279  		s.Prog(wasm.AEnd)
   280  		if logopt.Enabled() {
   281  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   282  		}
   283  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   284  			base.WarnfAt(v.Pos, "generated nil check")
   285  		}
   286  
   287  	case ssa.OpWasmLoweredWB:
   288  		p := s.Prog(wasm.ACall)
   289  		// AuxInt encodes how many buffer entries we need.
   290  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.GCWriteBarrier[v.AuxInt-1]}
   291  		setReg(s, v.Reg0()) // move result from wasm stack to register local
   292  
   293  	case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
   294  		getValue32(s, v.Args[0])
   295  		getValue64(s, v.Args[1])
   296  		p := s.Prog(v.Op.Asm())
   297  		p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   298  
   299  	case ssa.OpStoreReg:
   300  		getReg(s, wasm.REG_SP)
   301  		getValue64(s, v.Args[0])
   302  		p := s.Prog(storeOp(v.Type))
   303  		ssagen.AddrAuto(&p.To, v)
   304  
   305  	case ssa.OpClobber, ssa.OpClobberReg:
   306  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   307  
   308  	default:
   309  		if v.Type.IsMemory() {
   310  			return
   311  		}
   312  		if v.OnWasmStack {
   313  			s.OnWasmStackSkipped++
   314  			// If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
   315  			// Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
   316  			return
   317  		}
   318  		ssaGenValueOnStack(s, v, true)
   319  		if s.OnWasmStackSkipped != 0 {
   320  			panic("wasm: bad stack")
   321  		}
   322  		setReg(s, v.Reg())
   323  	}
   324  }
   325  
   326  func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
   327  	switch v.Op {
   328  	case ssa.OpWasmLoweredGetClosurePtr:
   329  		getReg(s, wasm.REG_CTXT)
   330  
   331  	case ssa.OpWasmLoweredGetCallerPC:
   332  		p := s.Prog(wasm.AI64Load)
   333  		// Caller PC is stored 8 bytes below first parameter.
   334  		p.From = obj.Addr{
   335  			Type:   obj.TYPE_MEM,
   336  			Name:   obj.NAME_PARAM,
   337  			Offset: -8,
   338  		}
   339  
   340  	case ssa.OpWasmLoweredGetCallerSP:
   341  		p := s.Prog(wasm.AGet)
   342  		// Caller SP is the address of the first parameter.
   343  		p.From = obj.Addr{
   344  			Type:   obj.TYPE_ADDR,
   345  			Name:   obj.NAME_PARAM,
   346  			Reg:    wasm.REG_SP,
   347  			Offset: 0,
   348  		}
   349  
   350  	case ssa.OpWasmLoweredAddr:
   351  		if v.Aux == nil { // address of off(SP), no symbol
   352  			getValue64(s, v.Args[0])
   353  			i64Const(s, v.AuxInt)
   354  			s.Prog(wasm.AI64Add)
   355  			break
   356  		}
   357  		p := s.Prog(wasm.AGet)
   358  		p.From.Type = obj.TYPE_ADDR
   359  		switch v.Aux.(type) {
   360  		case *obj.LSym:
   361  			ssagen.AddAux(&p.From, v)
   362  		case *ir.Name:
   363  			p.From.Reg = v.Args[0].Reg()
   364  			ssagen.AddAux(&p.From, v)
   365  		default:
   366  			panic("wasm: bad LoweredAddr")
   367  		}
   368  
   369  	case ssa.OpWasmLoweredConvert:
   370  		getValue64(s, v.Args[0])
   371  
   372  	case ssa.OpWasmSelect:
   373  		getValue64(s, v.Args[0])
   374  		getValue64(s, v.Args[1])
   375  		getValue32(s, v.Args[2])
   376  		s.Prog(v.Op.Asm())
   377  
   378  	case ssa.OpWasmI64AddConst:
   379  		getValue64(s, v.Args[0])
   380  		i64Const(s, v.AuxInt)
   381  		s.Prog(v.Op.Asm())
   382  
   383  	case ssa.OpWasmI64Const:
   384  		i64Const(s, v.AuxInt)
   385  
   386  	case ssa.OpWasmF32Const:
   387  		f32Const(s, v.AuxFloat())
   388  
   389  	case ssa.OpWasmF64Const:
   390  		f64Const(s, v.AuxFloat())
   391  
   392  	case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
   393  		getValue32(s, v.Args[0])
   394  		p := s.Prog(v.Op.Asm())
   395  		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   396  
   397  	case ssa.OpWasmI64Eqz:
   398  		getValue64(s, v.Args[0])
   399  		s.Prog(v.Op.Asm())
   400  		if extend {
   401  			s.Prog(wasm.AI64ExtendI32U)
   402  		}
   403  
   404  	case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   405  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   406  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   407  		getValue64(s, v.Args[0])
   408  		getValue64(s, v.Args[1])
   409  		s.Prog(v.Op.Asm())
   410  		if extend {
   411  			s.Prog(wasm.AI64ExtendI32U)
   412  		}
   413  
   414  	case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
   415  		ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
   416  		ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
   417  		getValue64(s, v.Args[0])
   418  		getValue64(s, v.Args[1])
   419  		s.Prog(v.Op.Asm())
   420  
   421  	case ssa.OpWasmI32Rotl:
   422  		getValue32(s, v.Args[0])
   423  		getValue32(s, v.Args[1])
   424  		s.Prog(wasm.AI32Rotl)
   425  		s.Prog(wasm.AI64ExtendI32U)
   426  
   427  	case ssa.OpWasmI64DivS:
   428  		getValue64(s, v.Args[0])
   429  		getValue64(s, v.Args[1])
   430  		if v.Type.Size() == 8 {
   431  			// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
   432  			p := s.Prog(wasm.ACall)
   433  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
   434  			break
   435  		}
   436  		s.Prog(wasm.AI64DivS)
   437  
   438  	case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
   439  		getValue64(s, v.Args[0])
   440  		if buildcfg.GOWASM.SatConv {
   441  			s.Prog(v.Op.Asm())
   442  		} else {
   443  			if v.Op == ssa.OpWasmI64TruncSatF32S {
   444  				s.Prog(wasm.AF64PromoteF32)
   445  			}
   446  			p := s.Prog(wasm.ACall)
   447  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
   448  		}
   449  
   450  	case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
   451  		getValue64(s, v.Args[0])
   452  		if buildcfg.GOWASM.SatConv {
   453  			s.Prog(v.Op.Asm())
   454  		} else {
   455  			if v.Op == ssa.OpWasmI64TruncSatF32U {
   456  				s.Prog(wasm.AF64PromoteF32)
   457  			}
   458  			p := s.Prog(wasm.ACall)
   459  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
   460  		}
   461  
   462  	case ssa.OpWasmF32DemoteF64:
   463  		getValue64(s, v.Args[0])
   464  		s.Prog(v.Op.Asm())
   465  
   466  	case ssa.OpWasmF64PromoteF32:
   467  		getValue64(s, v.Args[0])
   468  		s.Prog(v.Op.Asm())
   469  
   470  	case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
   471  		ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
   472  		ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
   473  		ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
   474  		ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
   475  		ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
   476  		getValue64(s, v.Args[0])
   477  		s.Prog(v.Op.Asm())
   478  
   479  	case ssa.OpLoadReg:
   480  		p := s.Prog(loadOp(v.Type))
   481  		ssagen.AddrAuto(&p.From, v.Args[0])
   482  
   483  	case ssa.OpCopy:
   484  		getValue64(s, v.Args[0])
   485  
   486  	default:
   487  		v.Fatalf("unexpected op: %s", v.Op)
   488  
   489  	}
   490  }
   491  
   492  func isCmp(v *ssa.Value) bool {
   493  	switch v.Op {
   494  	case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   495  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   496  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   497  		return true
   498  	default:
   499  		return false
   500  	}
   501  }
   502  
   503  func getValue32(s *ssagen.State, v *ssa.Value) {
   504  	if v.OnWasmStack {
   505  		s.OnWasmStackSkipped--
   506  		ssaGenValueOnStack(s, v, false)
   507  		if !isCmp(v) {
   508  			s.Prog(wasm.AI32WrapI64)
   509  		}
   510  		return
   511  	}
   512  
   513  	reg := v.Reg()
   514  	getReg(s, reg)
   515  	if reg != wasm.REG_SP {
   516  		s.Prog(wasm.AI32WrapI64)
   517  	}
   518  }
   519  
   520  func getValue64(s *ssagen.State, v *ssa.Value) {
   521  	if v.OnWasmStack {
   522  		s.OnWasmStackSkipped--
   523  		ssaGenValueOnStack(s, v, true)
   524  		return
   525  	}
   526  
   527  	reg := v.Reg()
   528  	getReg(s, reg)
   529  	if reg == wasm.REG_SP {
   530  		s.Prog(wasm.AI64ExtendI32U)
   531  	}
   532  }
   533  
   534  func i32Const(s *ssagen.State, val int32) {
   535  	p := s.Prog(wasm.AI32Const)
   536  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
   537  }
   538  
   539  func i64Const(s *ssagen.State, val int64) {
   540  	p := s.Prog(wasm.AI64Const)
   541  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
   542  }
   543  
   544  func f32Const(s *ssagen.State, val float64) {
   545  	p := s.Prog(wasm.AF32Const)
   546  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   547  }
   548  
   549  func f64Const(s *ssagen.State, val float64) {
   550  	p := s.Prog(wasm.AF64Const)
   551  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   552  }
   553  
   554  func getReg(s *ssagen.State, reg int16) {
   555  	p := s.Prog(wasm.AGet)
   556  	p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   557  }
   558  
   559  func setReg(s *ssagen.State, reg int16) {
   560  	p := s.Prog(wasm.ASet)
   561  	p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   562  }
   563  
   564  func loadOp(t *types.Type) obj.As {
   565  	if t.IsFloat() {
   566  		switch t.Size() {
   567  		case 4:
   568  			return wasm.AF32Load
   569  		case 8:
   570  			return wasm.AF64Load
   571  		default:
   572  			panic("bad load type")
   573  		}
   574  	}
   575  
   576  	switch t.Size() {
   577  	case 1:
   578  		if t.IsSigned() {
   579  			return wasm.AI64Load8S
   580  		}
   581  		return wasm.AI64Load8U
   582  	case 2:
   583  		if t.IsSigned() {
   584  			return wasm.AI64Load16S
   585  		}
   586  		return wasm.AI64Load16U
   587  	case 4:
   588  		if t.IsSigned() {
   589  			return wasm.AI64Load32S
   590  		}
   591  		return wasm.AI64Load32U
   592  	case 8:
   593  		return wasm.AI64Load
   594  	default:
   595  		panic("bad load type")
   596  	}
   597  }
   598  
   599  func storeOp(t *types.Type) obj.As {
   600  	if t.IsFloat() {
   601  		switch t.Size() {
   602  		case 4:
   603  			return wasm.AF32Store
   604  		case 8:
   605  			return wasm.AF64Store
   606  		default:
   607  			panic("bad store type")
   608  		}
   609  	}
   610  
   611  	switch t.Size() {
   612  	case 1:
   613  		return wasm.AI64Store8
   614  	case 2:
   615  		return wasm.AI64Store16
   616  	case 4:
   617  		return wasm.AI64Store32
   618  	case 8:
   619  		return wasm.AI64Store
   620  	default:
   621  		panic("bad store type")
   622  	}
   623  }
   624  

View as plain text