...
Run Format

Text file src/runtime/internal/atomic/asm_mips64x.s

Documentation: runtime/internal/atomic

     1	// Copyright 2015 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build mips64 mips64le
     6	
     7	#include "textflag.h"
     8	
     9	// bool cas(uint32 *ptr, uint32 old, uint32 new)
    10	// Atomically:
    11	//	if(*val == old){
    12	//		*val = new;
    13	//		return 1;
    14	//	} else
    15	//		return 0;
    16	TEXT ·Cas(SB), NOSPLIT, $0-17
    17		MOVV	ptr+0(FP), R1
    18		MOVW	old+8(FP), R2
    19		MOVW	new+12(FP), R5
    20		SYNC
    21	cas_again:
    22		MOVV	R5, R3
    23		LL	(R1), R4
    24		BNE	R2, R4, cas_fail
    25		SC	R3, (R1)
    26		BEQ	R3, cas_again
    27		MOVV	$1, R1
    28		MOVB	R1, ret+16(FP)
    29		SYNC
    30		RET
    31	cas_fail:
    32		MOVV	$0, R1
    33		JMP	-4(PC)
    34	
    35	// bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    36	// Atomically:
    37	//	if(*val == *old){
    38	//		*val = new;
    39	//		return 1;
    40	//	} else {
    41	//		return 0;
    42	//	}
    43	TEXT ·Cas64(SB), NOSPLIT, $0-25
    44		MOVV	ptr+0(FP), R1
    45		MOVV	old+8(FP), R2
    46		MOVV	new+16(FP), R5
    47		SYNC
    48	cas64_again:
    49		MOVV	R5, R3
    50		LLV	(R1), R4
    51		BNE	R2, R4, cas64_fail
    52		SCV	R3, (R1)
    53		BEQ	R3, cas64_again
    54		MOVV	$1, R1
    55		MOVB	R1, ret+24(FP)
    56		SYNC
    57		RET
    58	cas64_fail:
    59		MOVV	$0, R1
    60		JMP	-4(PC)
    61	
    62	TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    63		JMP	·Cas64(SB)
    64	
    65	TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    66		JMP	·Load64(SB)
    67	
    68	TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    69		JMP	·Load64(SB)
    70	
    71	TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    72		JMP	·Store64(SB)
    73	
    74	TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    75		JMP	·Xadd64(SB)
    76	
    77	TEXT ·Loadint64(SB), NOSPLIT, $0-16
    78		JMP	·Load64(SB)
    79	
    80	TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    81		JMP	·Xadd64(SB)
    82	
    83	// bool casp(void **val, void *old, void *new)
    84	// Atomically:
    85	//	if(*val == old){
    86	//		*val = new;
    87	//		return 1;
    88	//	} else
    89	//		return 0;
    90	TEXT ·Casp1(SB), NOSPLIT, $0-25
    91		JMP runtime∕internal∕atomic·Cas64(SB)
    92	
    93	// uint32 xadd(uint32 volatile *ptr, int32 delta)
    94	// Atomically:
    95	//	*val += delta;
    96	//	return *val;
    97	TEXT ·Xadd(SB), NOSPLIT, $0-20
    98		MOVV	ptr+0(FP), R2
    99		MOVW	delta+8(FP), R3
   100		SYNC
   101		LL	(R2), R1
   102		ADDU	R1, R3, R4
   103		MOVV	R4, R1
   104		SC	R4, (R2)
   105		BEQ	R4, -4(PC)
   106		MOVW	R1, ret+16(FP)
   107		SYNC
   108		RET
   109	
   110	TEXT ·Xadd64(SB), NOSPLIT, $0-24
   111		MOVV	ptr+0(FP), R2
   112		MOVV	delta+8(FP), R3
   113		SYNC
   114		LLV	(R2), R1
   115		ADDVU	R1, R3, R4
   116		MOVV	R4, R1
   117		SCV	R4, (R2)
   118		BEQ	R4, -4(PC)
   119		MOVV	R1, ret+16(FP)
   120		SYNC
   121		RET
   122	
   123	TEXT ·Xchg(SB), NOSPLIT, $0-20
   124		MOVV	ptr+0(FP), R2
   125		MOVW	new+8(FP), R5
   126	
   127		SYNC
   128		MOVV	R5, R3
   129		LL	(R2), R1
   130		SC	R3, (R2)
   131		BEQ	R3, -3(PC)
   132		MOVW	R1, ret+16(FP)
   133		SYNC
   134		RET
   135	
   136	TEXT ·Xchg64(SB), NOSPLIT, $0-24
   137		MOVV	ptr+0(FP), R2
   138		MOVV	new+8(FP), R5
   139	
   140		SYNC
   141		MOVV	R5, R3
   142		LLV	(R2), R1
   143		SCV	R3, (R2)
   144		BEQ	R3, -3(PC)
   145		MOVV	R1, ret+16(FP)
   146		SYNC
   147		RET
   148	
   149	TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   150		JMP	·Xchg64(SB)
   151	
   152	TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   153		JMP	·Store64(SB)
   154	
   155	TEXT ·Store(SB), NOSPLIT, $0-12
   156		MOVV	ptr+0(FP), R1
   157		MOVW	val+8(FP), R2
   158		SYNC
   159		MOVW	R2, 0(R1)
   160		SYNC
   161		RET
   162	
   163	TEXT ·Store64(SB), NOSPLIT, $0-16
   164		MOVV	ptr+0(FP), R1
   165		MOVV	val+8(FP), R2
   166		SYNC
   167		MOVV	R2, 0(R1)
   168		SYNC
   169		RET
   170	
   171	// void	Or8(byte volatile*, byte);
   172	TEXT ·Or8(SB), NOSPLIT, $0-9
   173		MOVV	ptr+0(FP), R1
   174		MOVBU	val+8(FP), R2
   175		// Align ptr down to 4 bytes so we can use 32-bit load/store.
   176		MOVV	$~3, R3
   177		AND	R1, R3
   178		// Compute val shift.
   179	#ifdef GOARCH_mips64
   180		// Big endian.  ptr = ptr ^ 3
   181		XOR	$3, R1
   182	#endif
   183		// R4 = ((ptr & 3) * 8)
   184		AND	$3, R1, R4
   185		SLLV	$3, R4
   186		// Shift val for aligned ptr. R2 = val << R4
   187		SLLV	R4, R2
   188	
   189		SYNC
   190		LL	(R3), R4
   191		OR	R2, R4
   192		SC	R4, (R3)
   193		BEQ	R4, -4(PC)
   194		SYNC
   195		RET
   196	
   197	// void	And8(byte volatile*, byte);
   198	TEXT ·And8(SB), NOSPLIT, $0-9
   199		MOVV	ptr+0(FP), R1
   200		MOVBU	val+8(FP), R2
   201		// Align ptr down to 4 bytes so we can use 32-bit load/store.
   202		MOVV	$~3, R3
   203		AND	R1, R3
   204		// Compute val shift.
   205	#ifdef GOARCH_mips64
   206		// Big endian.  ptr = ptr ^ 3
   207		XOR	$3, R1
   208	#endif
   209		// R4 = ((ptr & 3) * 8)
   210		AND	$3, R1, R4
   211		SLLV	$3, R4
   212		// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   213		MOVV	$0xFF, R5
   214		SLLV	R4, R2
   215		SLLV	R4, R5
   216		NOR	R0, R5
   217		OR	R5, R2
   218	
   219		SYNC
   220		LL	(R3), R4
   221		AND	R2, R4
   222		SC	R4, (R3)
   223		BEQ	R4, -4(PC)
   224		SYNC
   225		RET

View as plain text