...
Run Format

Text file src/runtime/internal/atomic/asm_386.s

Documentation: runtime/internal/atomic

     1	// Copyright 2015 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	#include "textflag.h"
     6	
     7	// bool Cas(int32 *val, int32 old, int32 new)
     8	// Atomically:
     9	//	if(*val == old){
    10	//		*val = new;
    11	//		return 1;
    12	//	}else
    13	//		return 0;
    14	TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
    15		MOVL	ptr+0(FP), BX
    16		MOVL	old+4(FP), AX
    17		MOVL	new+8(FP), CX
    18		LOCK
    19		CMPXCHGL	CX, 0(BX)
    20		SETEQ	ret+12(FP)
    21		RET
    22	
    23	TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
    24		JMP	runtime∕internal∕atomic·Cas(SB)
    25	
    26	TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
    27		JMP	runtime∕internal∕atomic·Load(SB)
    28	
    29	TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
    30		JMP	runtime∕internal∕atomic·Load(SB)
    31	
    32	TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
    33		JMP	runtime∕internal∕atomic·Store(SB)
    34	
    35	TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
    36		JMP runtime∕internal∕atomic·Xadd(SB)
    37	
    38	TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-12
    39		JMP runtime∕internal∕atomic·Load64(SB)
    40	
    41	TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20
    42		JMP runtime∕internal∕atomic·Xadd64(SB)
    43	
    44	
    45	// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
    46	// Atomically:
    47	//	if(*val == *old){
    48	//		*val = new;
    49	//		return 1;
    50	//	} else {
    51	//		return 0;
    52	//	}
    53	TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21
    54		MOVL	ptr+0(FP), BP
    55		TESTL	$7, BP
    56		JZ	2(PC)
    57		MOVL	0, BP // crash with nil ptr deref
    58		MOVL	old_lo+4(FP), AX
    59		MOVL	old_hi+8(FP), DX
    60		MOVL	new_lo+12(FP), BX
    61		MOVL	new_hi+16(FP), CX
    62		LOCK
    63		CMPXCHG8B	0(BP)
    64		SETEQ	ret+20(FP)
    65		RET
    66	
    67	// bool Casp1(void **p, void *old, void *new)
    68	// Atomically:
    69	//	if(*p == old){
    70	//		*p = new;
    71	//		return 1;
    72	//	}else
    73	//		return 0;
    74	TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13
    75		MOVL	ptr+0(FP), BX
    76		MOVL	old+4(FP), AX
    77		MOVL	new+8(FP), CX
    78		LOCK
    79		CMPXCHGL	CX, 0(BX)
    80		SETEQ	ret+12(FP)
    81		RET
    82	
    83	// uint32 Xadd(uint32 volatile *val, int32 delta)
    84	// Atomically:
    85	//	*val += delta;
    86	//	return *val;
    87	TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
    88		MOVL	ptr+0(FP), BX
    89		MOVL	delta+4(FP), AX
    90		MOVL	AX, CX
    91		LOCK
    92		XADDL	AX, 0(BX)
    93		ADDL	CX, AX
    94		MOVL	AX, ret+8(FP)
    95		RET
    96	
    97	TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
    98		MOVL	ptr+0(FP), BX
    99		MOVL	new+4(FP), AX
   100		XCHGL	AX, 0(BX)
   101		MOVL	AX, ret+8(FP)
   102		RET
   103	
   104	TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
   105		JMP	runtime∕internal∕atomic·Xchg(SB)
   106	
   107	
   108	TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-8
   109		MOVL	ptr+0(FP), BX
   110		MOVL	val+4(FP), AX
   111		XCHGL	AX, 0(BX)
   112		RET
   113	
   114	TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
   115		MOVL	ptr+0(FP), BX
   116		MOVL	val+4(FP), AX
   117		XCHGL	AX, 0(BX)
   118		RET
   119	
   120	// uint64 atomicload64(uint64 volatile* addr);
   121	TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
   122		MOVL	ptr+0(FP), AX
   123		TESTL	$7, AX
   124		JZ	2(PC)
   125		MOVL	0, AX // crash with nil ptr deref
   126		LEAL	ret_lo+4(FP), BX
   127		// MOVQ (%EAX), %MM0
   128		BYTE $0x0f; BYTE $0x6f; BYTE $0x00
   129		// MOVQ %MM0, 0(%EBX)
   130		BYTE $0x0f; BYTE $0x7f; BYTE $0x03
   131		// EMMS
   132		BYTE $0x0F; BYTE $0x77
   133		RET
   134	
   135	// void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v);
   136	TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12
   137		MOVL	ptr+0(FP), AX
   138		TESTL	$7, AX
   139		JZ	2(PC)
   140		MOVL	0, AX // crash with nil ptr deref
   141		// MOVQ and EMMS were introduced on the Pentium MMX.
   142		// MOVQ 0x8(%ESP), %MM0
   143		BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
   144		// MOVQ %MM0, (%EAX)
   145		BYTE $0x0f; BYTE $0x7f; BYTE $0x00 
   146		// EMMS
   147		BYTE $0x0F; BYTE $0x77
   148		// This is essentially a no-op, but it provides required memory fencing.
   149		// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
   150		MOVL	$0, AX
   151		LOCK
   152		XADDL	AX, (SP)
   153		RET
   154	
   155	// void	runtime∕internal∕atomic·Or8(byte volatile*, byte);
   156	TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
   157		MOVL	ptr+0(FP), AX
   158		MOVB	val+4(FP), BX
   159		LOCK
   160		ORB	BX, (AX)
   161		RET
   162	
   163	// void	runtime∕internal∕atomic·And8(byte volatile*, byte);
   164	TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
   165		MOVL	ptr+0(FP), AX
   166		MOVB	val+4(FP), BX
   167		LOCK
   168		ANDB	BX, (AX)
   169		RET

View as plain text