...
Run Format

Text file src/runtime/internal/atomic/asm_386.s

Documentation: runtime/internal/atomic

     1	// Copyright 2015 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	#include "textflag.h"
     6	
     7	// bool Cas(int32 *val, int32 old, int32 new)
     8	// Atomically:
     9	//	if(*val == old){
    10	//		*val = new;
    11	//		return 1;
    12	//	}else
    13	//		return 0;
    14	TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
    15		MOVL	ptr+0(FP), BX
    16		MOVL	old+4(FP), AX
    17		MOVL	new+8(FP), CX
    18		LOCK
    19		CMPXCHGL	CX, 0(BX)
    20		SETEQ	ret+12(FP)
    21		RET
    22	
    23	TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
    24		JMP	runtime∕internal∕atomic·Cas(SB)
    25	
    26	TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
    27		JMP	runtime∕internal∕atomic·Load(SB)
    28	
    29	TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
    30		JMP	runtime∕internal∕atomic·Load(SB)
    31	
    32	TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
    33		JMP	runtime∕internal∕atomic·Store(SB)
    34	
    35	TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
    36		JMP runtime∕internal∕atomic·Xadd(SB)
    37	
    38	TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-12
    39		JMP runtime∕internal∕atomic·Load64(SB)
    40	
    41	TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20
    42		JMP runtime∕internal∕atomic·Xadd64(SB)
    43	
    44	
    45	// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
    46	// Atomically:
    47	//	if(*val == *old){
    48	//		*val = new;
    49	//		return 1;
    50	//	} else {
    51	//		return 0;
    52	//	}
    53	TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21
    54		MOVL	ptr+0(FP), BP
    55		TESTL	$7, BP
    56		JZ	2(PC)
    57		MOVL	0, BP // crash with nil ptr deref
    58		MOVL	old_lo+4(FP), AX
    59		MOVL	old_hi+8(FP), DX
    60		MOVL	new_lo+12(FP), BX
    61		MOVL	new_hi+16(FP), CX
    62		LOCK
    63		CMPXCHG8B	0(BP)
    64		SETEQ	ret+20(FP)
    65		RET
    66	
    67	// bool Casp1(void **p, void *old, void *new)
    68	// Atomically:
    69	//	if(*p == old){
    70	//		*p = new;
    71	//		return 1;
    72	//	}else
    73	//		return 0;
    74	TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13
    75		MOVL	ptr+0(FP), BX
    76		MOVL	old+4(FP), AX
    77		MOVL	new+8(FP), CX
    78		LOCK
    79		CMPXCHGL	CX, 0(BX)
    80		SETEQ	ret+12(FP)
    81		RET
    82	
    83	// uint32 Xadd(uint32 volatile *val, int32 delta)
    84	// Atomically:
    85	//	*val += delta;
    86	//	return *val;
    87	TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
    88		MOVL	ptr+0(FP), BX
    89		MOVL	delta+4(FP), AX
    90		MOVL	AX, CX
    91		LOCK
    92		XADDL	AX, 0(BX)
    93		ADDL	CX, AX
    94		MOVL	AX, ret+8(FP)
    95		RET
    96	
    97	TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-20
    98		// no XADDQ so use CMPXCHG8B loop
    99		MOVL	ptr+0(FP), BP
   100		TESTL	$7, BP
   101		JZ	2(PC)
   102		MOVL	0, AX // crash when unaligned
   103		// DI:SI = delta
   104		MOVL	delta_lo+4(FP), SI
   105		MOVL	delta_hi+8(FP), DI
   106		// DX:AX = *addr
   107		MOVL	0(BP), AX
   108		MOVL	4(BP), DX
   109	addloop:
   110		// CX:BX = DX:AX (*addr) + DI:SI (delta)
   111		MOVL	AX, BX
   112		MOVL	DX, CX
   113		ADDL	SI, BX
   114		ADCL	DI, CX
   115	
   116		// if *addr == DX:AX {
   117		//	*addr = CX:BX
   118		// } else {
   119		//	DX:AX = *addr
   120		// }
   121		// all in one instruction
   122		LOCK
   123		CMPXCHG8B	0(BP)
   124	
   125		JNZ	addloop
   126	
   127		// success
   128		// return CX:BX
   129		MOVL	BX, ret_lo+12(FP)
   130		MOVL	CX, ret_hi+16(FP)
   131		RET
   132	
   133	TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
   134		MOVL	ptr+0(FP), BX
   135		MOVL	new+4(FP), AX
   136		XCHGL	AX, 0(BX)
   137		MOVL	AX, ret+8(FP)
   138		RET
   139	
   140	TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
   141		JMP	runtime∕internal∕atomic·Xchg(SB)
   142	
   143	TEXT  runtime∕internal∕atomic·Xchg64(SB),NOSPLIT,$0-20
   144		// no XCHGQ so use CMPXCHG8B loop
   145		MOVL	ptr+0(FP), BP
   146		TESTL	$7, BP
   147		JZ	2(PC)
   148		MOVL	0, AX // crash when unaligned
   149		// CX:BX = new
   150		MOVL	new_lo+4(FP), BX
   151		MOVL	new_hi+8(FP), CX
   152		// DX:AX = *addr
   153		MOVL	0(BP), AX
   154		MOVL	4(BP), DX
   155	swaploop:
   156		// if *addr == DX:AX
   157		//	*addr = CX:BX
   158		// else
   159		//	DX:AX = *addr
   160		// all in one instruction
   161		LOCK
   162		CMPXCHG8B	0(BP)
   163		JNZ	swaploop
   164	
   165		// success
   166		// return DX:AX
   167		MOVL	AX, ret_lo+12(FP)
   168		MOVL	DX, ret_hi+16(FP)
   169		RET
   170	
   171	TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-8
   172		MOVL	ptr+0(FP), BX
   173		MOVL	val+4(FP), AX
   174		XCHGL	AX, 0(BX)
   175		RET
   176	
   177	TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
   178		MOVL	ptr+0(FP), BX
   179		MOVL	val+4(FP), AX
   180		XCHGL	AX, 0(BX)
   181		RET
   182	
   183	// uint64 atomicload64(uint64 volatile* addr);
   184	TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
   185		MOVL	ptr+0(FP), AX
   186		TESTL	$7, AX
   187		JZ	2(PC)
   188		MOVL	0, AX // crash with nil ptr deref
   189		MOVQ	(AX), M0
   190		MOVQ	M0, ret+4(FP)
   191		EMMS
   192		RET
   193	
   194	// void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v);
   195	TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12
   196		MOVL	ptr+0(FP), AX
   197		TESTL	$7, AX
   198		JZ	2(PC)
   199		MOVL	0, AX // crash with nil ptr deref
   200		// MOVQ and EMMS were introduced on the Pentium MMX.
   201		MOVQ	val+4(FP), M0
   202		MOVQ	M0, (AX)
   203		EMMS
   204		// This is essentially a no-op, but it provides required memory fencing.
   205		// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
   206		XORL	AX, AX
   207		LOCK
   208		XADDL	AX, (SP)
   209		RET
   210	
   211	// void	runtime∕internal∕atomic·Or8(byte volatile*, byte);
   212	TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
   213		MOVL	ptr+0(FP), AX
   214		MOVB	val+4(FP), BX
   215		LOCK
   216		ORB	BX, (AX)
   217		RET
   218	
   219	// void	runtime∕internal∕atomic·And8(byte volatile*, byte);
   220	TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
   221		MOVL	ptr+0(FP), AX
   222		MOVB	val+4(FP), BX
   223		LOCK
   224		ANDB	BX, (AX)
   225		RET

View as plain text