...
Run Format

Text file src/runtime/asm_amd64p32.s

Documentation: runtime

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	#include "go_asm.h"
     6	#include "go_tls.h"
     7	#include "funcdata.h"
     8	#include "textflag.h"
     9	
    10	TEXT runtime·rt0_go(SB),NOSPLIT,$0
    11		// copy arguments forward on an even stack
    12		MOVL	argc+0(FP), AX
    13		MOVL	argv+4(FP), BX
    14		MOVL	SP, CX
    15		SUBL	$128, CX		// plenty of scratch
    16		ANDL	$~15, CX
    17		MOVL	CX, SP
    18	
    19		MOVL	AX, 16(SP)
    20		MOVL	BX, 24(SP)
    21		
    22		// create istack out of the given (operating system) stack.
    23		MOVL	$runtime·g0(SB), DI
    24		LEAL	(-64*1024+104)(SP), BX
    25		MOVL	BX, g_stackguard0(DI)
    26		MOVL	BX, g_stackguard1(DI)
    27		MOVL	BX, (g_stack+stack_lo)(DI)
    28		MOVL	SP, (g_stack+stack_hi)(DI)
    29	
    30		// find out information about the processor we're on
    31		MOVL	$0, AX
    32		CPUID
    33		CMPL	AX, $0
    34		JE	nocpuinfo
    35	
    36		CMPL	BX, $0x756E6547  // "Genu"
    37		JNE	notintel
    38		CMPL	DX, $0x49656E69  // "ineI"
    39		JNE	notintel
    40		CMPL	CX, $0x6C65746E  // "ntel"
    41		JNE	notintel
    42		MOVB	$1, runtime·isIntel(SB)
    43	notintel:
    44	
    45		// Load EAX=1 cpuid flags
    46		MOVL	$1, AX
    47		CPUID
    48		MOVL	AX, runtime·processorVersionInfo(SB)
    49	
    50	nocpuinfo:
    51		LEAL	runtime·m0+m_tls(SB), DI
    52		CALL	runtime·settls(SB)
    53	
    54		// store through it, to make sure it works
    55		get_tls(BX)
    56		MOVQ	$0x123, g(BX)
    57		MOVQ	runtime·m0+m_tls(SB), AX
    58		CMPQ	AX, $0x123
    59		JEQ 2(PC)
    60		CALL	runtime·abort(SB)
    61	ok:
    62		// set the per-goroutine and per-mach "registers"
    63		get_tls(BX)
    64		LEAL	runtime·g0(SB), CX
    65		MOVL	CX, g(BX)
    66		LEAL	runtime·m0(SB), AX
    67	
    68		// save m->g0 = g0
    69		MOVL	CX, m_g0(AX)
    70		// save m0 to g0->m
    71		MOVL	AX, g_m(CX)
    72	
    73		CLD				// convention is D is always left cleared
    74		CALL	runtime·check(SB)
    75	
    76		MOVL	16(SP), AX		// copy argc
    77		MOVL	AX, 0(SP)
    78		MOVL	24(SP), AX		// copy argv
    79		MOVL	AX, 4(SP)
    80		CALL	runtime·args(SB)
    81		CALL	runtime·osinit(SB)
    82		CALL	runtime·schedinit(SB)
    83	
    84		// create a new goroutine to start program
    85		MOVL	$runtime·mainPC(SB), AX	// entry
    86		MOVL	$0, 0(SP)
    87		MOVL	AX, 4(SP)
    88		CALL	runtime·newproc(SB)
    89	
    90		// start this M
    91		CALL	runtime·mstart(SB)
    92	
    93		MOVL	$0xf1, 0xf1  // crash
    94		RET
    95	
    96	DATA	runtime·mainPC+0(SB)/4,$runtime·main(SB)
    97	GLOBL	runtime·mainPC(SB),RODATA,$4
    98	
    99	TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
   100		INT $3
   101		RET
   102	
   103	TEXT runtime·asminit(SB),NOSPLIT,$0-0
   104		// No per-thread init.
   105		RET
   106	
   107	/*
   108	 *  go-routine
   109	 */
   110	
   111	// void gosave(Gobuf*)
   112	// save state in Gobuf; setjmp
   113	TEXT runtime·gosave(SB), NOSPLIT, $0-4
   114		MOVL	buf+0(FP), AX	// gobuf
   115		LEAL	buf+0(FP), BX	// caller's SP
   116		MOVL	BX, gobuf_sp(AX)
   117		MOVL	0(SP), BX		// caller's PC
   118		MOVL	BX, gobuf_pc(AX)
   119		MOVQ	$0, gobuf_ret(AX)
   120		// Assert ctxt is zero. See func save.
   121		MOVL	gobuf_ctxt(AX), BX
   122		TESTL	BX, BX
   123		JZ	2(PC)
   124		CALL	runtime·badctxt(SB)
   125		get_tls(CX)
   126		MOVL	g(CX), BX
   127		MOVL	BX, gobuf_g(AX)
   128		RET
   129	
   130	// void gogo(Gobuf*)
   131	// restore state from Gobuf; longjmp
   132	TEXT runtime·gogo(SB), NOSPLIT, $8-4
   133		MOVL	buf+0(FP), BX		// gobuf
   134		MOVL	gobuf_g(BX), DX
   135		MOVL	0(DX), CX		// make sure g != nil
   136		get_tls(CX)
   137		MOVL	DX, g(CX)
   138		MOVL	gobuf_sp(BX), SP	// restore SP
   139		MOVL	gobuf_ctxt(BX), DX
   140		MOVQ	gobuf_ret(BX), AX
   141		MOVL	$0, gobuf_sp(BX)	// clear to help garbage collector
   142		MOVQ	$0, gobuf_ret(BX)
   143		MOVL	$0, gobuf_ctxt(BX)
   144		MOVL	gobuf_pc(BX), BX
   145		JMP	BX
   146	
   147	// func mcall(fn func(*g))
   148	// Switch to m->g0's stack, call fn(g).
   149	// Fn must never return. It should gogo(&g->sched)
   150	// to keep running g.
   151	TEXT runtime·mcall(SB), NOSPLIT, $0-4
   152		MOVL	fn+0(FP), DI
   153		
   154		get_tls(CX)
   155		MOVL	g(CX), AX	// save state in g->sched
   156		MOVL	0(SP), BX	// caller's PC
   157		MOVL	BX, (g_sched+gobuf_pc)(AX)
   158		LEAL	fn+0(FP), BX	// caller's SP
   159		MOVL	BX, (g_sched+gobuf_sp)(AX)
   160		MOVL	AX, (g_sched+gobuf_g)(AX)
   161	
   162		// switch to m->g0 & its stack, call fn
   163		MOVL	g(CX), BX
   164		MOVL	g_m(BX), BX
   165		MOVL	m_g0(BX), SI
   166		CMPL	SI, AX	// if g == m->g0 call badmcall
   167		JNE	3(PC)
   168		MOVL	$runtime·badmcall(SB), AX
   169		JMP	AX
   170		MOVL	SI, g(CX)	// g = m->g0
   171		MOVL	(g_sched+gobuf_sp)(SI), SP	// sp = m->g0->sched.sp
   172		PUSHQ	AX
   173		MOVL	DI, DX
   174		MOVL	0(DI), DI
   175		CALL	DI
   176		POPQ	AX
   177		MOVL	$runtime·badmcall2(SB), AX
   178		JMP	AX
   179		RET
   180	
   181	// systemstack_switch is a dummy routine that systemstack leaves at the bottom
   182	// of the G stack. We need to distinguish the routine that
   183	// lives at the bottom of the G stack from the one that lives
   184	// at the top of the system stack because the one at the top of
   185	// the system stack terminates the stack walk (see topofstack()).
   186	TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
   187		RET
   188	
   189	// func systemstack(fn func())
   190	TEXT runtime·systemstack(SB), NOSPLIT, $0-4
   191		MOVL	fn+0(FP), DI	// DI = fn
   192		get_tls(CX)
   193		MOVL	g(CX), AX	// AX = g
   194		MOVL	g_m(AX), BX	// BX = m
   195	
   196		CMPL	AX, m_gsignal(BX)
   197		JEQ	noswitch
   198	
   199		MOVL	m_g0(BX), DX	// DX = g0
   200		CMPL	AX, DX
   201		JEQ	noswitch
   202	
   203		CMPL	AX, m_curg(BX)
   204		JNE	bad
   205	
   206		// switch stacks
   207		// save our state in g->sched. Pretend to
   208		// be systemstack_switch if the G stack is scanned.
   209		MOVL	$runtime·systemstack_switch(SB), SI
   210		MOVL	SI, (g_sched+gobuf_pc)(AX)
   211		MOVL	SP, (g_sched+gobuf_sp)(AX)
   212		MOVL	AX, (g_sched+gobuf_g)(AX)
   213	
   214		// switch to g0
   215		MOVL	DX, g(CX)
   216		MOVL	(g_sched+gobuf_sp)(DX), SP
   217	
   218		// call target function
   219		MOVL	DI, DX
   220		MOVL	0(DI), DI
   221		CALL	DI
   222	
   223		// switch back to g
   224		get_tls(CX)
   225		MOVL	g(CX), AX
   226		MOVL	g_m(AX), BX
   227		MOVL	m_curg(BX), AX
   228		MOVL	AX, g(CX)
   229		MOVL	(g_sched+gobuf_sp)(AX), SP
   230		MOVL	$0, (g_sched+gobuf_sp)(AX)
   231		RET
   232	
   233	noswitch:
   234		// already on m stack, just call directly
   235		// Using a tail call here cleans up tracebacks since we won't stop
   236		// at an intermediate systemstack.
   237		MOVL	DI, DX
   238		MOVL	0(DI), DI
   239		JMP	DI
   240	
   241	bad:
   242		// Not g0, not curg. Must be gsignal, but that's not allowed.
   243		// Hide call from linker nosplit analysis.
   244		MOVL	$runtime·badsystemstack(SB), AX
   245		CALL	AX
   246		INT	$3
   247	
   248	/*
   249	 * support for morestack
   250	 */
   251	
   252	// Called during function prolog when more stack is needed.
   253	//
   254	// The traceback routines see morestack on a g0 as being
   255	// the top of a stack (for example, morestack calling newstack
   256	// calling the scheduler calling newm calling gc), so we must
   257	// record an argument size. For that purpose, it has no arguments.
   258	TEXT runtime·morestack(SB),NOSPLIT,$0-0
   259		get_tls(CX)
   260		MOVL	g(CX), BX
   261		MOVL	g_m(BX), BX
   262	
   263		// Cannot grow scheduler stack (m->g0).
   264		MOVL	m_g0(BX), SI
   265		CMPL	g(CX), SI
   266		JNE	3(PC)
   267		CALL	runtime·badmorestackg0(SB)
   268		MOVL	0, AX
   269	
   270		// Cannot grow signal stack (m->gsignal).
   271		MOVL	m_gsignal(BX), SI
   272		CMPL	g(CX), SI
   273		JNE	3(PC)
   274		CALL	runtime·badmorestackgsignal(SB)
   275		MOVL	0, AX
   276	
   277		// Called from f.
   278		// Set m->morebuf to f's caller.
   279		MOVL	8(SP), AX	// f's caller's PC
   280		MOVL	AX, (m_morebuf+gobuf_pc)(BX)
   281		LEAL	16(SP), AX	// f's caller's SP
   282		MOVL	AX, (m_morebuf+gobuf_sp)(BX)
   283		get_tls(CX)
   284		MOVL	g(CX), SI
   285		MOVL	SI, (m_morebuf+gobuf_g)(BX)
   286	
   287		// Set g->sched to context in f.
   288		MOVL	0(SP), AX // f's PC
   289		MOVL	AX, (g_sched+gobuf_pc)(SI)
   290		MOVL	SI, (g_sched+gobuf_g)(SI)
   291		LEAL	8(SP), AX // f's SP
   292		MOVL	AX, (g_sched+gobuf_sp)(SI)
   293		MOVL	DX, (g_sched+gobuf_ctxt)(SI)
   294	
   295		// Call newstack on m->g0's stack.
   296		MOVL	m_g0(BX), BX
   297		MOVL	BX, g(CX)
   298		MOVL	(g_sched+gobuf_sp)(BX), SP
   299		CALL	runtime·newstack(SB)
   300		MOVL	$0, 0x1003	// crash if newstack returns
   301		RET
   302	
   303	// morestack trampolines
   304	TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
   305		MOVL	$0, DX
   306		JMP	runtime·morestack(SB)
   307	
   308	// reflectcall: call a function with the given argument list
   309	// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
   310	// we don't have variable-sized frames, so we use a small number
   311	// of constant-sized-frame functions to encode a few bits of size in the pc.
   312	// Caution: ugly multiline assembly macros in your future!
   313	
   314	#define DISPATCH(NAME,MAXSIZE)		\
   315		CMPL	CX, $MAXSIZE;		\
   316		JA	3(PC);			\
   317		MOVL	$NAME(SB), AX;		\
   318		JMP	AX
   319	// Note: can't just "JMP NAME(SB)" - bad inlining results.
   320	
   321	TEXT reflect·call(SB), NOSPLIT, $0-0
   322		JMP	·reflectcall(SB)
   323	
   324	TEXT ·reflectcall(SB), NOSPLIT, $0-20
   325		MOVLQZX argsize+12(FP), CX
   326		DISPATCH(runtime·call16, 16)
   327		DISPATCH(runtime·call32, 32)
   328		DISPATCH(runtime·call64, 64)
   329		DISPATCH(runtime·call128, 128)
   330		DISPATCH(runtime·call256, 256)
   331		DISPATCH(runtime·call512, 512)
   332		DISPATCH(runtime·call1024, 1024)
   333		DISPATCH(runtime·call2048, 2048)
   334		DISPATCH(runtime·call4096, 4096)
   335		DISPATCH(runtime·call8192, 8192)
   336		DISPATCH(runtime·call16384, 16384)
   337		DISPATCH(runtime·call32768, 32768)
   338		DISPATCH(runtime·call65536, 65536)
   339		DISPATCH(runtime·call131072, 131072)
   340		DISPATCH(runtime·call262144, 262144)
   341		DISPATCH(runtime·call524288, 524288)
   342		DISPATCH(runtime·call1048576, 1048576)
   343		DISPATCH(runtime·call2097152, 2097152)
   344		DISPATCH(runtime·call4194304, 4194304)
   345		DISPATCH(runtime·call8388608, 8388608)
   346		DISPATCH(runtime·call16777216, 16777216)
   347		DISPATCH(runtime·call33554432, 33554432)
   348		DISPATCH(runtime·call67108864, 67108864)
   349		DISPATCH(runtime·call134217728, 134217728)
   350		DISPATCH(runtime·call268435456, 268435456)
   351		DISPATCH(runtime·call536870912, 536870912)
   352		DISPATCH(runtime·call1073741824, 1073741824)
   353		MOVL	$runtime·badreflectcall(SB), AX
   354		JMP	AX
   355	
   356	#define CALLFN(NAME,MAXSIZE)			\
   357	TEXT NAME(SB), WRAPPER, $MAXSIZE-20;		\
   358		NO_LOCAL_POINTERS;			\
   359		/* copy arguments to stack */		\
   360		MOVL	argptr+8(FP), SI;		\
   361		MOVL	argsize+12(FP), CX;		\
   362		MOVL	SP, DI;				\
   363		REP;MOVSB;				\
   364		/* call function */			\
   365		MOVL	f+4(FP), DX;			\
   366		MOVL	(DX), AX;			\
   367		CALL	AX;				\
   368		/* copy return values back */		\
   369		MOVL	argtype+0(FP), DX;		\
   370		MOVL	argptr+8(FP), DI;		\
   371		MOVL	argsize+12(FP), CX;		\
   372		MOVL	retoffset+16(FP), BX;		\
   373		MOVL	SP, SI;				\
   374		ADDL	BX, DI;				\
   375		ADDL	BX, SI;				\
   376		SUBL	BX, CX;				\
   377		CALL	callRet<>(SB);			\
   378		RET
   379	
   380	// callRet copies return values back at the end of call*. This is a
   381	// separate function so it can allocate stack space for the arguments
   382	// to reflectcallmove. It does not follow the Go ABI; it expects its
   383	// arguments in registers.
   384	TEXT callRet<>(SB), NOSPLIT, $16-0
   385		MOVL	DX, 0(SP)
   386		MOVL	DI, 4(SP)
   387		MOVL	SI, 8(SP)
   388		MOVL	CX, 12(SP)
   389		CALL	runtime·reflectcallmove(SB)
   390		RET
   391	
   392	CALLFN(·call16, 16)
   393	CALLFN(·call32, 32)
   394	CALLFN(·call64, 64)
   395	CALLFN(·call128, 128)
   396	CALLFN(·call256, 256)
   397	CALLFN(·call512, 512)
   398	CALLFN(·call1024, 1024)
   399	CALLFN(·call2048, 2048)
   400	CALLFN(·call4096, 4096)
   401	CALLFN(·call8192, 8192)
   402	CALLFN(·call16384, 16384)
   403	CALLFN(·call32768, 32768)
   404	CALLFN(·call65536, 65536)
   405	CALLFN(·call131072, 131072)
   406	CALLFN(·call262144, 262144)
   407	CALLFN(·call524288, 524288)
   408	CALLFN(·call1048576, 1048576)
   409	CALLFN(·call2097152, 2097152)
   410	CALLFN(·call4194304, 4194304)
   411	CALLFN(·call8388608, 8388608)
   412	CALLFN(·call16777216, 16777216)
   413	CALLFN(·call33554432, 33554432)
   414	CALLFN(·call67108864, 67108864)
   415	CALLFN(·call134217728, 134217728)
   416	CALLFN(·call268435456, 268435456)
   417	CALLFN(·call536870912, 536870912)
   418	CALLFN(·call1073741824, 1073741824)
   419	
   420	TEXT runtime·procyield(SB),NOSPLIT,$0-0
   421		MOVL	cycles+0(FP), AX
   422	again:
   423		PAUSE
   424		SUBL	$1, AX
   425		JNZ	again
   426		RET
   427	
   428	TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
   429		// Stores are already ordered on x86, so this is just a
   430		// compile barrier.
   431		RET
   432	
   433	// void jmpdefer(fn, sp);
   434	// called from deferreturn.
   435	// 1. pop the caller
   436	// 2. sub 5 bytes from the callers return
   437	// 3. jmp to the argument
   438	TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
   439		MOVL	fv+0(FP), DX
   440		MOVL	argp+4(FP), BX
   441		LEAL	-8(BX), SP	// caller sp after CALL
   442		SUBL	$5, (SP)	// return to CALL again
   443		MOVL	0(DX), BX
   444		JMP	BX	// but first run the deferred function
   445	
   446	// func asmcgocall(fn, arg unsafe.Pointer) int32
   447	// Not implemented.
   448	TEXT runtime·asmcgocall(SB),NOSPLIT,$0-12
   449		MOVL	0, AX
   450		RET
   451	
   452	// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
   453	// Not implemented.
   454	TEXT runtime·cgocallback(SB),NOSPLIT,$0-16
   455		MOVL	0, AX
   456		RET
   457	
   458	// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
   459	// Not implemented.
   460	TEXT ·cgocallback_gofunc(SB),NOSPLIT,$0-16
   461		MOVL	0, AX
   462		RET
   463	
   464	// void setg(G*); set g. for use by needm.
   465	// Not implemented.
   466	TEXT runtime·setg(SB), NOSPLIT, $0-4
   467		MOVL	0, AX
   468		RET
   469	
   470	TEXT runtime·abort(SB),NOSPLIT,$0-0
   471		INT	$3
   472	loop:
   473		JMP	loop
   474	
   475	// check that SP is in range [g->stack.lo, g->stack.hi)
   476	TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
   477		get_tls(CX)
   478		MOVL	g(CX), AX
   479		CMPL	(g_stack+stack_hi)(AX), SP
   480		JHI	2(PC)
   481		MOVL	0, AX
   482		CMPL	SP, (g_stack+stack_lo)(AX)
   483		JHI	2(PC)
   484		MOVL	0, AX
   485		RET
   486	
   487	// int64 runtime·cputicks(void)
   488	TEXT runtime·cputicks(SB),NOSPLIT,$0-0
   489		RDTSC
   490		SHLQ	$32, DX
   491		ADDQ	DX, AX
   492		MOVQ	AX, ret+0(FP)
   493		RET
   494	
   495	// hash function using AES hardware instructions
   496	// For now, our one amd64p32 system (NaCl) does not
   497	// support using AES instructions, so have not bothered to
   498	// write the implementations. Can copy and adjust the ones
   499	// in asm_amd64.s when the time comes.
   500	
   501	TEXT runtime·aeshash(SB),NOSPLIT,$0-20
   502		MOVL	AX, ret+16(FP)
   503		RET
   504	
   505	TEXT runtime·aeshashstr(SB),NOSPLIT,$0-12
   506		MOVL	AX, ret+8(FP)
   507		RET
   508	
   509	TEXT runtime·aeshash32(SB),NOSPLIT,$0-12
   510		MOVL	AX, ret+8(FP)
   511		RET
   512	
   513	TEXT runtime·aeshash64(SB),NOSPLIT,$0-12
   514		MOVL	AX, ret+8(FP)
   515		RET
   516	
   517	TEXT runtime·return0(SB), NOSPLIT, $0
   518		MOVL	$0, AX
   519		RET
   520	
   521	// The top-most function running on a goroutine
   522	// returns to goexit+PCQuantum.
   523	TEXT runtime·goexit(SB),NOSPLIT,$0-0
   524		BYTE	$0x90	// NOP
   525		CALL	runtime·goexit1(SB)	// does not return
   526		// traceback from goexit1 must hit code range of goexit
   527		BYTE	$0x90	// NOP
   528	
   529	TEXT ·checkASM(SB),NOSPLIT,$0-1
   530		MOVB	$1, ret+0(FP)
   531		RET
   532	
   533	// gcWriteBarrier performs a heap pointer write and informs the GC.
   534	//
   535	// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
   536	// - DI is the destination of the write
   537	// - AX is the value being written at DI
   538	// It clobbers FLAGS and SI. It does not clobber any other general-purpose registers,
   539	// but may clobber others (e.g., SSE registers).
   540	TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$88
   541		// Save the registers clobbered by the fast path. This is slightly
   542		// faster than having the caller spill these.
   543		MOVQ	R14, 72(SP)
   544		MOVQ	R13, 80(SP)
   545		// TODO: Consider passing g.m.p in as an argument so they can be shared
   546		// across a sequence of write barriers.
   547		get_tls(R13)
   548		MOVL	g(R13), R13
   549		MOVL	g_m(R13), R13
   550		MOVL	m_p(R13), R13
   551		MOVL	(p_wbBuf+wbBuf_next)(R13), R14
   552		// Increment wbBuf.next position.
   553		LEAL	8(R14), R14
   554		MOVL	R14, (p_wbBuf+wbBuf_next)(R13)
   555		CMPL	R14, (p_wbBuf+wbBuf_end)(R13)
   556		// Record the write.
   557		MOVL	AX, -8(R14)	// Record value
   558		MOVL	(DI), R13	// TODO: This turns bad writes into bad reads.
   559		MOVL	R13, -4(R14)	// Record *slot
   560		// Is the buffer full? (flags set in CMPL above)
   561		JEQ	flush
   562	ret:
   563		MOVQ	72(SP), R14
   564		MOVQ	80(SP), R13
   565		// Do the write.
   566		MOVL	AX, (DI)
   567		RET			// Clobbers SI on NaCl
   568	
   569	flush:
   570		// Save all general purpose registers since these could be
   571		// clobbered by wbBufFlush and were not saved by the caller.
   572		// It is possible for wbBufFlush to clobber other registers
   573		// (e.g., SSE registers), but the compiler takes care of saving
   574		// those in the caller if necessary. This strikes a balance
   575		// with registers that are likely to be used.
   576		//
   577		// We don't have type information for these, but all code under
   578		// here is NOSPLIT, so nothing will observe these.
   579		//
   580		// TODO: We could strike a different balance; e.g., saving X0
   581		// and not saving GP registers that are less likely to be used.
   582		MOVL	DI, 0(SP)	// Also first argument to wbBufFlush
   583		MOVL	AX, 4(SP)	// Also second argument to wbBufFlush
   584		MOVQ	BX, 8(SP)
   585		MOVQ	CX, 16(SP)
   586		MOVQ	DX, 24(SP)
   587		// DI already saved
   588		// SI is always clobbered on nacl
   589		// BP is reserved on nacl
   590		MOVQ	R8, 32(SP)
   591		MOVQ	R9, 40(SP)
   592		MOVQ	R10, 48(SP)
   593		MOVQ	R11, 56(SP)
   594		MOVQ	R12, 64(SP)
   595		// R13 already saved
   596		// R14 already saved
   597		// R15 is reserved on nacl
   598	
   599		// This takes arguments DI and AX
   600		CALL	runtime·wbBufFlush(SB)
   601	
   602		MOVL	0(SP), DI
   603		MOVL	4(SP), AX
   604		MOVQ	8(SP), BX
   605		MOVQ	16(SP), CX
   606		MOVQ	24(SP), DX
   607		MOVQ	32(SP), R8
   608		MOVQ	40(SP), R9
   609		MOVQ	48(SP), R10
   610		MOVQ	56(SP), R11
   611		MOVQ	64(SP), R12
   612		JMP	ret

View as plain text