Text file src/runtime/internal/atomic/atomic_loong64.s

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // bool cas(uint32 *ptr, uint32 old, uint32 new)
     8  // Atomically:
     9  //	if(*ptr == old){
    10  //		*ptr = new;
    11  //		return 1;
    12  //	} else
    13  //		return 0;
    14  TEXT ·Cas(SB), NOSPLIT, $0-17
    15  	MOVV	ptr+0(FP), R4
    16  	MOVW	old+8(FP), R5
    17  	MOVW	new+12(FP), R6
    18  	DBAR
    19  cas_again:
    20  	MOVV	R6, R7
    21  	LL	(R4), R8
    22  	BNE	R5, R8, cas_fail
    23  	SC	R7, (R4)
    24  	BEQ	R7, cas_again
    25  	MOVV	$1, R4
    26  	MOVB	R4, ret+16(FP)
    27  	DBAR
    28  	RET
    29  cas_fail:
    30  	MOVV	$0, R4
    31  	JMP	-4(PC)
    32  
    33  // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    34  // Atomically:
    35  //	if(*ptr == old){
    36  //		*ptr = new;
    37  //		return 1;
    38  //	} else {
    39  //		return 0;
    40  //	}
    41  TEXT ·Cas64(SB), NOSPLIT, $0-25
    42  	MOVV	ptr+0(FP), R4
    43  	MOVV	old+8(FP), R5
    44  	MOVV	new+16(FP), R6
    45  	DBAR
    46  cas64_again:
    47  	MOVV	R6, R7
    48  	LLV	(R4), R8
    49  	BNE	R5, R8, cas64_fail
    50  	SCV	R7, (R4)
    51  	BEQ	R7, cas64_again
    52  	MOVV	$1, R4
    53  	MOVB	R4, ret+24(FP)
    54  	DBAR
    55  	RET
    56  cas64_fail:
    57  	MOVV	$0, R4
    58  	JMP	-4(PC)
    59  
    60  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    61  	JMP	·Cas64(SB)
    62  
    63  TEXT ·CasRel(SB), NOSPLIT, $0-17
    64  	JMP	·Cas(SB)
    65  
    66  TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    67  	JMP	·Load64(SB)
    68  
    69  TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    70  	JMP	·Load64(SB)
    71  
    72  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    73  	JMP	·Store64(SB)
    74  
    75  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    76  	JMP	·Xadd64(SB)
    77  
    78  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    79  	JMP	·Load64(SB)
    80  
    81  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    82  	JMP	·Xadd64(SB)
    83  
    84  // bool casp(void **val, void *old, void *new)
    85  // Atomically:
    86  //	if(*val == old){
    87  //		*val = new;
    88  //		return 1;
    89  //	} else
    90  //		return 0;
    91  TEXT ·Casp1(SB), NOSPLIT, $0-25
    92  	JMP	·Cas64(SB)
    93  
    94  // uint32 xadd(uint32 volatile *ptr, int32 delta)
    95  // Atomically:
    96  //	*val += delta;
    97  //	return *val;
    98  TEXT ·Xadd(SB), NOSPLIT, $0-20
    99  	MOVV	ptr+0(FP), R4
   100  	MOVW	delta+8(FP), R5
   101  	DBAR
   102  	LL	(R4), R6
   103  	ADDU	R6, R5, R7
   104  	MOVV	R7, R6
   105  	SC	R7, (R4)
   106  	BEQ	R7, -4(PC)
   107  	MOVW	R6, ret+16(FP)
   108  	DBAR
   109  	RET
   110  
   111  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   112  	MOVV	ptr+0(FP), R4
   113  	MOVV	delta+8(FP), R5
   114  	DBAR
   115  	LLV	(R4), R6
   116  	ADDVU	R6, R5, R7
   117  	MOVV	R7, R6
   118  	SCV	R7, (R4)
   119  	BEQ	R7, -4(PC)
   120  	MOVV	R6, ret+16(FP)
   121  	DBAR
   122  	RET
   123  
   124  TEXT ·Xchg(SB), NOSPLIT, $0-20
   125  	MOVV	ptr+0(FP), R4
   126  	MOVW	new+8(FP), R5
   127  
   128  	DBAR
   129  	MOVV	R5, R6
   130  	LL	(R4), R7
   131  	SC	R6, (R4)
   132  	BEQ	R6, -3(PC)
   133  	MOVW	R7, ret+16(FP)
   134  	DBAR
   135  	RET
   136  
   137  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   138  	MOVV	ptr+0(FP), R4
   139  	MOVV	new+8(FP), R5
   140  
   141  	DBAR
   142  	MOVV	R5, R6
   143  	LLV	(R4), R7
   144  	SCV	R6, (R4)
   145  	BEQ	R6, -3(PC)
   146  	MOVV	R7, ret+16(FP)
   147  	DBAR
   148  	RET
   149  
   150  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   151  	JMP	·Xchg64(SB)
   152  
   153  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   154  	JMP	·Store64(SB)
   155  
   156  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   157  	JMP	·Store(SB)
   158  
   159  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   160  	JMP	·Store64(SB)
   161  
   162  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   163  	JMP     ·Store64(SB)
   164  
   165  TEXT ·Store(SB), NOSPLIT, $0-12
   166  	MOVV	ptr+0(FP), R4
   167  	MOVW	val+8(FP), R5
   168  	DBAR
   169  	MOVW	R5, 0(R4)
   170  	DBAR
   171  	RET
   172  
   173  TEXT ·Store8(SB), NOSPLIT, $0-9
   174  	MOVV	ptr+0(FP), R4
   175  	MOVB	val+8(FP), R5
   176  	DBAR
   177  	MOVB	R5, 0(R4)
   178  	DBAR
   179  	RET
   180  
   181  TEXT ·Store64(SB), NOSPLIT, $0-16
   182  	MOVV	ptr+0(FP), R4
   183  	MOVV	val+8(FP), R5
   184  	DBAR
   185  	MOVV	R5, 0(R4)
   186  	DBAR
   187  	RET
   188  
   189  // void	Or8(byte volatile*, byte);
   190  TEXT ·Or8(SB), NOSPLIT, $0-9
   191  	MOVV	ptr+0(FP), R4
   192  	MOVBU	val+8(FP), R5
   193  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   194  	MOVV	$~3, R6
   195  	AND	R4, R6
   196  	// R7 = ((ptr & 3) * 8)
   197  	AND	$3, R4, R7
   198  	SLLV	$3, R7
   199  	// Shift val for aligned ptr. R5 = val << R4
   200  	SLLV	R7, R5
   201  
   202  	DBAR
   203  	LL	(R6), R7
   204  	OR	R5, R7
   205  	SC	R7, (R6)
   206  	BEQ	R7, -4(PC)
   207  	DBAR
   208  	RET
   209  
   210  // void	And8(byte volatile*, byte);
   211  TEXT ·And8(SB), NOSPLIT, $0-9
   212  	MOVV	ptr+0(FP), R4
   213  	MOVBU	val+8(FP), R5
   214  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   215  	MOVV	$~3, R6
   216  	AND	R4, R6
   217  	// R7 = ((ptr & 3) * 8)
   218  	AND	$3, R4, R7
   219  	SLLV	$3, R7
   220  	// Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7)
   221  	MOVV	$0xFF, R8
   222  	SLLV	R7, R5
   223  	SLLV	R7, R8
   224  	NOR	R0, R8
   225  	OR	R8, R5
   226  
   227  	DBAR
   228  	LL	(R6), R7
   229  	AND	R5, R7
   230  	SC	R7, (R6)
   231  	BEQ	R7, -4(PC)
   232  	DBAR
   233  	RET
   234  
   235  // func Or(addr *uint32, v uint32)
   236  TEXT ·Or(SB), NOSPLIT, $0-12
   237  	MOVV	ptr+0(FP), R4
   238  	MOVW	val+8(FP), R5
   239  	DBAR
   240  	LL	(R4), R6
   241  	OR	R5, R6
   242  	SC	R6, (R4)
   243  	BEQ	R6, -4(PC)
   244  	DBAR
   245  	RET
   246  
   247  // func And(addr *uint32, v uint32)
   248  TEXT ·And(SB), NOSPLIT, $0-12
   249  	MOVV	ptr+0(FP), R4
   250  	MOVW	val+8(FP), R5
   251  	DBAR
   252  	LL	(R4), R6
   253  	AND	R5, R6
   254  	SC	R6, (R4)
   255  	BEQ	R6, -4(PC)
   256  	DBAR
   257  	RET
   258  
   259  // uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr)
   260  TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
   261  	MOVV	ptr+0(FP), R19
   262  	DBAR
   263  	MOVWU	0(R19), R19
   264  	DBAR
   265  	MOVW	R19, ret+8(FP)
   266  	RET
   267  
   268  // uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr)
   269  TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
   270  	MOVV	ptr+0(FP), R19
   271  	DBAR
   272  	MOVBU	0(R19), R19
   273  	DBAR
   274  	MOVB	R19, ret+8(FP)
   275  	RET
   276  
   277  // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr)
   278  TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
   279  	MOVV	ptr+0(FP), R19
   280  	DBAR
   281  	MOVV	0(R19), R19
   282  	DBAR
   283  	MOVV	R19, ret+8(FP)
   284  	RET
   285  
   286  // void *runtime∕internal∕atomic·Loadp(void *volatile *ptr)
   287  TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
   288  	MOVV	ptr+0(FP), R19
   289  	DBAR
   290  	MOVV	0(R19), R19
   291  	DBAR
   292  	MOVV	R19, ret+8(FP)
   293  	RET
   294  
   295  // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr)
   296  TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
   297  	JMP	·Load(SB)
   298  
   299  // uint64 ·LoadAcq64(uint64 volatile* ptr)
   300  TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
   301  	JMP	·Load64(SB)
   302  
   303  // uintptr ·LoadAcquintptr(uintptr volatile* ptr)
   304  TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
   305  	JMP	·Load64(SB)
   306  
   307  

View as plain text