...
Run Format

Source file src/cmd/compile/internal/ssa/rewriteAMD64.go

Documentation: cmd/compile/internal/ssa

     1  // Code generated from gen/AMD64.rules; DO NOT EDIT.
     2  // generated with: cd gen; go run *.go
     3  
     4  package ssa
     5  
     6  import "fmt"
     7  import "math"
     8  import "cmd/internal/obj"
     9  import "cmd/internal/objabi"
    10  import "cmd/compile/internal/types"
    11  
    12  var _ = fmt.Println   // in case not otherwise used
    13  var _ = math.MinInt8  // in case not otherwise used
    14  var _ = obj.ANOP      // in case not otherwise used
    15  var _ = objabi.GOROOT // in case not otherwise used
    16  var _ = types.TypeMem // in case not otherwise used
    17  
    18  func rewriteValueAMD64(v *Value) bool {
    19  	switch v.Op {
    20  	case OpAMD64ADCQ:
    21  		return rewriteValueAMD64_OpAMD64ADCQ_0(v)
    22  	case OpAMD64ADCQconst:
    23  		return rewriteValueAMD64_OpAMD64ADCQconst_0(v)
    24  	case OpAMD64ADDL:
    25  		return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v)
    26  	case OpAMD64ADDLconst:
    27  		return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v)
    28  	case OpAMD64ADDLconstmodify:
    29  		return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v)
    30  	case OpAMD64ADDLload:
    31  		return rewriteValueAMD64_OpAMD64ADDLload_0(v)
    32  	case OpAMD64ADDLmodify:
    33  		return rewriteValueAMD64_OpAMD64ADDLmodify_0(v)
    34  	case OpAMD64ADDQ:
    35  		return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v)
    36  	case OpAMD64ADDQcarry:
    37  		return rewriteValueAMD64_OpAMD64ADDQcarry_0(v)
    38  	case OpAMD64ADDQconst:
    39  		return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v)
    40  	case OpAMD64ADDQconstmodify:
    41  		return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v)
    42  	case OpAMD64ADDQload:
    43  		return rewriteValueAMD64_OpAMD64ADDQload_0(v)
    44  	case OpAMD64ADDQmodify:
    45  		return rewriteValueAMD64_OpAMD64ADDQmodify_0(v)
    46  	case OpAMD64ADDSD:
    47  		return rewriteValueAMD64_OpAMD64ADDSD_0(v)
    48  	case OpAMD64ADDSDload:
    49  		return rewriteValueAMD64_OpAMD64ADDSDload_0(v)
    50  	case OpAMD64ADDSS:
    51  		return rewriteValueAMD64_OpAMD64ADDSS_0(v)
    52  	case OpAMD64ADDSSload:
    53  		return rewriteValueAMD64_OpAMD64ADDSSload_0(v)
    54  	case OpAMD64ANDL:
    55  		return rewriteValueAMD64_OpAMD64ANDL_0(v)
    56  	case OpAMD64ANDLconst:
    57  		return rewriteValueAMD64_OpAMD64ANDLconst_0(v)
    58  	case OpAMD64ANDLconstmodify:
    59  		return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v)
    60  	case OpAMD64ANDLload:
    61  		return rewriteValueAMD64_OpAMD64ANDLload_0(v)
    62  	case OpAMD64ANDLmodify:
    63  		return rewriteValueAMD64_OpAMD64ANDLmodify_0(v)
    64  	case OpAMD64ANDQ:
    65  		return rewriteValueAMD64_OpAMD64ANDQ_0(v)
    66  	case OpAMD64ANDQconst:
    67  		return rewriteValueAMD64_OpAMD64ANDQconst_0(v)
    68  	case OpAMD64ANDQconstmodify:
    69  		return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v)
    70  	case OpAMD64ANDQload:
    71  		return rewriteValueAMD64_OpAMD64ANDQload_0(v)
    72  	case OpAMD64ANDQmodify:
    73  		return rewriteValueAMD64_OpAMD64ANDQmodify_0(v)
    74  	case OpAMD64BSFQ:
    75  		return rewriteValueAMD64_OpAMD64BSFQ_0(v)
    76  	case OpAMD64BTCLconst:
    77  		return rewriteValueAMD64_OpAMD64BTCLconst_0(v)
    78  	case OpAMD64BTCLconstmodify:
    79  		return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v)
    80  	case OpAMD64BTCLmodify:
    81  		return rewriteValueAMD64_OpAMD64BTCLmodify_0(v)
    82  	case OpAMD64BTCQconst:
    83  		return rewriteValueAMD64_OpAMD64BTCQconst_0(v)
    84  	case OpAMD64BTCQconstmodify:
    85  		return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v)
    86  	case OpAMD64BTCQmodify:
    87  		return rewriteValueAMD64_OpAMD64BTCQmodify_0(v)
    88  	case OpAMD64BTLconst:
    89  		return rewriteValueAMD64_OpAMD64BTLconst_0(v)
    90  	case OpAMD64BTQconst:
    91  		return rewriteValueAMD64_OpAMD64BTQconst_0(v)
    92  	case OpAMD64BTRLconst:
    93  		return rewriteValueAMD64_OpAMD64BTRLconst_0(v)
    94  	case OpAMD64BTRLconstmodify:
    95  		return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v)
    96  	case OpAMD64BTRLmodify:
    97  		return rewriteValueAMD64_OpAMD64BTRLmodify_0(v)
    98  	case OpAMD64BTRQconst:
    99  		return rewriteValueAMD64_OpAMD64BTRQconst_0(v)
   100  	case OpAMD64BTRQconstmodify:
   101  		return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v)
   102  	case OpAMD64BTRQmodify:
   103  		return rewriteValueAMD64_OpAMD64BTRQmodify_0(v)
   104  	case OpAMD64BTSLconst:
   105  		return rewriteValueAMD64_OpAMD64BTSLconst_0(v)
   106  	case OpAMD64BTSLconstmodify:
   107  		return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v)
   108  	case OpAMD64BTSLmodify:
   109  		return rewriteValueAMD64_OpAMD64BTSLmodify_0(v)
   110  	case OpAMD64BTSQconst:
   111  		return rewriteValueAMD64_OpAMD64BTSQconst_0(v)
   112  	case OpAMD64BTSQconstmodify:
   113  		return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v)
   114  	case OpAMD64BTSQmodify:
   115  		return rewriteValueAMD64_OpAMD64BTSQmodify_0(v)
   116  	case OpAMD64CMOVLCC:
   117  		return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
   118  	case OpAMD64CMOVLCS:
   119  		return rewriteValueAMD64_OpAMD64CMOVLCS_0(v)
   120  	case OpAMD64CMOVLEQ:
   121  		return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v)
   122  	case OpAMD64CMOVLGE:
   123  		return rewriteValueAMD64_OpAMD64CMOVLGE_0(v)
   124  	case OpAMD64CMOVLGT:
   125  		return rewriteValueAMD64_OpAMD64CMOVLGT_0(v)
   126  	case OpAMD64CMOVLHI:
   127  		return rewriteValueAMD64_OpAMD64CMOVLHI_0(v)
   128  	case OpAMD64CMOVLLE:
   129  		return rewriteValueAMD64_OpAMD64CMOVLLE_0(v)
   130  	case OpAMD64CMOVLLS:
   131  		return rewriteValueAMD64_OpAMD64CMOVLLS_0(v)
   132  	case OpAMD64CMOVLLT:
   133  		return rewriteValueAMD64_OpAMD64CMOVLLT_0(v)
   134  	case OpAMD64CMOVLNE:
   135  		return rewriteValueAMD64_OpAMD64CMOVLNE_0(v)
   136  	case OpAMD64CMOVQCC:
   137  		return rewriteValueAMD64_OpAMD64CMOVQCC_0(v)
   138  	case OpAMD64CMOVQCS:
   139  		return rewriteValueAMD64_OpAMD64CMOVQCS_0(v)
   140  	case OpAMD64CMOVQEQ:
   141  		return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v)
   142  	case OpAMD64CMOVQGE:
   143  		return rewriteValueAMD64_OpAMD64CMOVQGE_0(v)
   144  	case OpAMD64CMOVQGT:
   145  		return rewriteValueAMD64_OpAMD64CMOVQGT_0(v)
   146  	case OpAMD64CMOVQHI:
   147  		return rewriteValueAMD64_OpAMD64CMOVQHI_0(v)
   148  	case OpAMD64CMOVQLE:
   149  		return rewriteValueAMD64_OpAMD64CMOVQLE_0(v)
   150  	case OpAMD64CMOVQLS:
   151  		return rewriteValueAMD64_OpAMD64CMOVQLS_0(v)
   152  	case OpAMD64CMOVQLT:
   153  		return rewriteValueAMD64_OpAMD64CMOVQLT_0(v)
   154  	case OpAMD64CMOVQNE:
   155  		return rewriteValueAMD64_OpAMD64CMOVQNE_0(v)
   156  	case OpAMD64CMOVWCC:
   157  		return rewriteValueAMD64_OpAMD64CMOVWCC_0(v)
   158  	case OpAMD64CMOVWCS:
   159  		return rewriteValueAMD64_OpAMD64CMOVWCS_0(v)
   160  	case OpAMD64CMOVWEQ:
   161  		return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v)
   162  	case OpAMD64CMOVWGE:
   163  		return rewriteValueAMD64_OpAMD64CMOVWGE_0(v)
   164  	case OpAMD64CMOVWGT:
   165  		return rewriteValueAMD64_OpAMD64CMOVWGT_0(v)
   166  	case OpAMD64CMOVWHI:
   167  		return rewriteValueAMD64_OpAMD64CMOVWHI_0(v)
   168  	case OpAMD64CMOVWLE:
   169  		return rewriteValueAMD64_OpAMD64CMOVWLE_0(v)
   170  	case OpAMD64CMOVWLS:
   171  		return rewriteValueAMD64_OpAMD64CMOVWLS_0(v)
   172  	case OpAMD64CMOVWLT:
   173  		return rewriteValueAMD64_OpAMD64CMOVWLT_0(v)
   174  	case OpAMD64CMOVWNE:
   175  		return rewriteValueAMD64_OpAMD64CMOVWNE_0(v)
   176  	case OpAMD64CMPB:
   177  		return rewriteValueAMD64_OpAMD64CMPB_0(v)
   178  	case OpAMD64CMPBconst:
   179  		return rewriteValueAMD64_OpAMD64CMPBconst_0(v)
   180  	case OpAMD64CMPBconstload:
   181  		return rewriteValueAMD64_OpAMD64CMPBconstload_0(v)
   182  	case OpAMD64CMPBload:
   183  		return rewriteValueAMD64_OpAMD64CMPBload_0(v)
   184  	case OpAMD64CMPL:
   185  		return rewriteValueAMD64_OpAMD64CMPL_0(v)
   186  	case OpAMD64CMPLconst:
   187  		return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v)
   188  	case OpAMD64CMPLconstload:
   189  		return rewriteValueAMD64_OpAMD64CMPLconstload_0(v)
   190  	case OpAMD64CMPLload:
   191  		return rewriteValueAMD64_OpAMD64CMPLload_0(v)
   192  	case OpAMD64CMPQ:
   193  		return rewriteValueAMD64_OpAMD64CMPQ_0(v)
   194  	case OpAMD64CMPQconst:
   195  		return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v)
   196  	case OpAMD64CMPQconstload:
   197  		return rewriteValueAMD64_OpAMD64CMPQconstload_0(v)
   198  	case OpAMD64CMPQload:
   199  		return rewriteValueAMD64_OpAMD64CMPQload_0(v)
   200  	case OpAMD64CMPW:
   201  		return rewriteValueAMD64_OpAMD64CMPW_0(v)
   202  	case OpAMD64CMPWconst:
   203  		return rewriteValueAMD64_OpAMD64CMPWconst_0(v)
   204  	case OpAMD64CMPWconstload:
   205  		return rewriteValueAMD64_OpAMD64CMPWconstload_0(v)
   206  	case OpAMD64CMPWload:
   207  		return rewriteValueAMD64_OpAMD64CMPWload_0(v)
   208  	case OpAMD64CMPXCHGLlock:
   209  		return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v)
   210  	case OpAMD64CMPXCHGQlock:
   211  		return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v)
   212  	case OpAMD64DIVSD:
   213  		return rewriteValueAMD64_OpAMD64DIVSD_0(v)
   214  	case OpAMD64DIVSDload:
   215  		return rewriteValueAMD64_OpAMD64DIVSDload_0(v)
   216  	case OpAMD64DIVSS:
   217  		return rewriteValueAMD64_OpAMD64DIVSS_0(v)
   218  	case OpAMD64DIVSSload:
   219  		return rewriteValueAMD64_OpAMD64DIVSSload_0(v)
   220  	case OpAMD64HMULL:
   221  		return rewriteValueAMD64_OpAMD64HMULL_0(v)
   222  	case OpAMD64HMULLU:
   223  		return rewriteValueAMD64_OpAMD64HMULLU_0(v)
   224  	case OpAMD64HMULQ:
   225  		return rewriteValueAMD64_OpAMD64HMULQ_0(v)
   226  	case OpAMD64HMULQU:
   227  		return rewriteValueAMD64_OpAMD64HMULQU_0(v)
   228  	case OpAMD64LEAL:
   229  		return rewriteValueAMD64_OpAMD64LEAL_0(v)
   230  	case OpAMD64LEAL1:
   231  		return rewriteValueAMD64_OpAMD64LEAL1_0(v)
   232  	case OpAMD64LEAL2:
   233  		return rewriteValueAMD64_OpAMD64LEAL2_0(v)
   234  	case OpAMD64LEAL4:
   235  		return rewriteValueAMD64_OpAMD64LEAL4_0(v)
   236  	case OpAMD64LEAL8:
   237  		return rewriteValueAMD64_OpAMD64LEAL8_0(v)
   238  	case OpAMD64LEAQ:
   239  		return rewriteValueAMD64_OpAMD64LEAQ_0(v)
   240  	case OpAMD64LEAQ1:
   241  		return rewriteValueAMD64_OpAMD64LEAQ1_0(v)
   242  	case OpAMD64LEAQ2:
   243  		return rewriteValueAMD64_OpAMD64LEAQ2_0(v)
   244  	case OpAMD64LEAQ4:
   245  		return rewriteValueAMD64_OpAMD64LEAQ4_0(v)
   246  	case OpAMD64LEAQ8:
   247  		return rewriteValueAMD64_OpAMD64LEAQ8_0(v)
   248  	case OpAMD64MOVBQSX:
   249  		return rewriteValueAMD64_OpAMD64MOVBQSX_0(v)
   250  	case OpAMD64MOVBQSXload:
   251  		return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v)
   252  	case OpAMD64MOVBQZX:
   253  		return rewriteValueAMD64_OpAMD64MOVBQZX_0(v)
   254  	case OpAMD64MOVBload:
   255  		return rewriteValueAMD64_OpAMD64MOVBload_0(v)
   256  	case OpAMD64MOVBloadidx1:
   257  		return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v)
   258  	case OpAMD64MOVBstore:
   259  		return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v)
   260  	case OpAMD64MOVBstoreconst:
   261  		return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v)
   262  	case OpAMD64MOVBstoreconstidx1:
   263  		return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v)
   264  	case OpAMD64MOVBstoreidx1:
   265  		return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) || rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v)
   266  	case OpAMD64MOVLQSX:
   267  		return rewriteValueAMD64_OpAMD64MOVLQSX_0(v)
   268  	case OpAMD64MOVLQSXload:
   269  		return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v)
   270  	case OpAMD64MOVLQZX:
   271  		return rewriteValueAMD64_OpAMD64MOVLQZX_0(v)
   272  	case OpAMD64MOVLatomicload:
   273  		return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v)
   274  	case OpAMD64MOVLf2i:
   275  		return rewriteValueAMD64_OpAMD64MOVLf2i_0(v)
   276  	case OpAMD64MOVLi2f:
   277  		return rewriteValueAMD64_OpAMD64MOVLi2f_0(v)
   278  	case OpAMD64MOVLload:
   279  		return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v)
   280  	case OpAMD64MOVLloadidx1:
   281  		return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v)
   282  	case OpAMD64MOVLloadidx4:
   283  		return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v)
   284  	case OpAMD64MOVLloadidx8:
   285  		return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v)
   286  	case OpAMD64MOVLstore:
   287  		return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v)
   288  	case OpAMD64MOVLstoreconst:
   289  		return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v)
   290  	case OpAMD64MOVLstoreconstidx1:
   291  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v)
   292  	case OpAMD64MOVLstoreconstidx4:
   293  		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v)
   294  	case OpAMD64MOVLstoreidx1:
   295  		return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v)
   296  	case OpAMD64MOVLstoreidx4:
   297  		return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v)
   298  	case OpAMD64MOVLstoreidx8:
   299  		return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v)
   300  	case OpAMD64MOVOload:
   301  		return rewriteValueAMD64_OpAMD64MOVOload_0(v)
   302  	case OpAMD64MOVOstore:
   303  		return rewriteValueAMD64_OpAMD64MOVOstore_0(v)
   304  	case OpAMD64MOVQatomicload:
   305  		return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v)
   306  	case OpAMD64MOVQf2i:
   307  		return rewriteValueAMD64_OpAMD64MOVQf2i_0(v)
   308  	case OpAMD64MOVQi2f:
   309  		return rewriteValueAMD64_OpAMD64MOVQi2f_0(v)
   310  	case OpAMD64MOVQload:
   311  		return rewriteValueAMD64_OpAMD64MOVQload_0(v)
   312  	case OpAMD64MOVQloadidx1:
   313  		return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v)
   314  	case OpAMD64MOVQloadidx8:
   315  		return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
   316  	case OpAMD64MOVQstore:
   317  		return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v)
   318  	case OpAMD64MOVQstoreconst:
   319  		return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
   320  	case OpAMD64MOVQstoreconstidx1:
   321  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v)
   322  	case OpAMD64MOVQstoreconstidx8:
   323  		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v)
   324  	case OpAMD64MOVQstoreidx1:
   325  		return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v)
   326  	case OpAMD64MOVQstoreidx8:
   327  		return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v)
   328  	case OpAMD64MOVSDload:
   329  		return rewriteValueAMD64_OpAMD64MOVSDload_0(v)
   330  	case OpAMD64MOVSDloadidx1:
   331  		return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v)
   332  	case OpAMD64MOVSDloadidx8:
   333  		return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v)
   334  	case OpAMD64MOVSDstore:
   335  		return rewriteValueAMD64_OpAMD64MOVSDstore_0(v)
   336  	case OpAMD64MOVSDstoreidx1:
   337  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v)
   338  	case OpAMD64MOVSDstoreidx8:
   339  		return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v)
   340  	case OpAMD64MOVSSload:
   341  		return rewriteValueAMD64_OpAMD64MOVSSload_0(v)
   342  	case OpAMD64MOVSSloadidx1:
   343  		return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v)
   344  	case OpAMD64MOVSSloadidx4:
   345  		return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v)
   346  	case OpAMD64MOVSSstore:
   347  		return rewriteValueAMD64_OpAMD64MOVSSstore_0(v)
   348  	case OpAMD64MOVSSstoreidx1:
   349  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v)
   350  	case OpAMD64MOVSSstoreidx4:
   351  		return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v)
   352  	case OpAMD64MOVWQSX:
   353  		return rewriteValueAMD64_OpAMD64MOVWQSX_0(v)
   354  	case OpAMD64MOVWQSXload:
   355  		return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v)
   356  	case OpAMD64MOVWQZX:
   357  		return rewriteValueAMD64_OpAMD64MOVWQZX_0(v)
   358  	case OpAMD64MOVWload:
   359  		return rewriteValueAMD64_OpAMD64MOVWload_0(v)
   360  	case OpAMD64MOVWloadidx1:
   361  		return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v)
   362  	case OpAMD64MOVWloadidx2:
   363  		return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v)
   364  	case OpAMD64MOVWstore:
   365  		return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v)
   366  	case OpAMD64MOVWstoreconst:
   367  		return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v)
   368  	case OpAMD64MOVWstoreconstidx1:
   369  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v)
   370  	case OpAMD64MOVWstoreconstidx2:
   371  		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v)
   372  	case OpAMD64MOVWstoreidx1:
   373  		return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v)
   374  	case OpAMD64MOVWstoreidx2:
   375  		return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v)
   376  	case OpAMD64MULL:
   377  		return rewriteValueAMD64_OpAMD64MULL_0(v)
   378  	case OpAMD64MULLconst:
   379  		return rewriteValueAMD64_OpAMD64MULLconst_0(v) || rewriteValueAMD64_OpAMD64MULLconst_10(v) || rewriteValueAMD64_OpAMD64MULLconst_20(v) || rewriteValueAMD64_OpAMD64MULLconst_30(v)
   380  	case OpAMD64MULQ:
   381  		return rewriteValueAMD64_OpAMD64MULQ_0(v)
   382  	case OpAMD64MULQconst:
   383  		return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v)
   384  	case OpAMD64MULSD:
   385  		return rewriteValueAMD64_OpAMD64MULSD_0(v)
   386  	case OpAMD64MULSDload:
   387  		return rewriteValueAMD64_OpAMD64MULSDload_0(v)
   388  	case OpAMD64MULSS:
   389  		return rewriteValueAMD64_OpAMD64MULSS_0(v)
   390  	case OpAMD64MULSSload:
   391  		return rewriteValueAMD64_OpAMD64MULSSload_0(v)
   392  	case OpAMD64NEGL:
   393  		return rewriteValueAMD64_OpAMD64NEGL_0(v)
   394  	case OpAMD64NEGQ:
   395  		return rewriteValueAMD64_OpAMD64NEGQ_0(v)
   396  	case OpAMD64NOTL:
   397  		return rewriteValueAMD64_OpAMD64NOTL_0(v)
   398  	case OpAMD64NOTQ:
   399  		return rewriteValueAMD64_OpAMD64NOTQ_0(v)
   400  	case OpAMD64ORL:
   401  		return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v)
   402  	case OpAMD64ORLconst:
   403  		return rewriteValueAMD64_OpAMD64ORLconst_0(v)
   404  	case OpAMD64ORLconstmodify:
   405  		return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v)
   406  	case OpAMD64ORLload:
   407  		return rewriteValueAMD64_OpAMD64ORLload_0(v)
   408  	case OpAMD64ORLmodify:
   409  		return rewriteValueAMD64_OpAMD64ORLmodify_0(v)
   410  	case OpAMD64ORQ:
   411  		return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v)
   412  	case OpAMD64ORQconst:
   413  		return rewriteValueAMD64_OpAMD64ORQconst_0(v)
   414  	case OpAMD64ORQconstmodify:
   415  		return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v)
   416  	case OpAMD64ORQload:
   417  		return rewriteValueAMD64_OpAMD64ORQload_0(v)
   418  	case OpAMD64ORQmodify:
   419  		return rewriteValueAMD64_OpAMD64ORQmodify_0(v)
   420  	case OpAMD64ROLB:
   421  		return rewriteValueAMD64_OpAMD64ROLB_0(v)
   422  	case OpAMD64ROLBconst:
   423  		return rewriteValueAMD64_OpAMD64ROLBconst_0(v)
   424  	case OpAMD64ROLL:
   425  		return rewriteValueAMD64_OpAMD64ROLL_0(v)
   426  	case OpAMD64ROLLconst:
   427  		return rewriteValueAMD64_OpAMD64ROLLconst_0(v)
   428  	case OpAMD64ROLQ:
   429  		return rewriteValueAMD64_OpAMD64ROLQ_0(v)
   430  	case OpAMD64ROLQconst:
   431  		return rewriteValueAMD64_OpAMD64ROLQconst_0(v)
   432  	case OpAMD64ROLW:
   433  		return rewriteValueAMD64_OpAMD64ROLW_0(v)
   434  	case OpAMD64ROLWconst:
   435  		return rewriteValueAMD64_OpAMD64ROLWconst_0(v)
   436  	case OpAMD64RORB:
   437  		return rewriteValueAMD64_OpAMD64RORB_0(v)
   438  	case OpAMD64RORL:
   439  		return rewriteValueAMD64_OpAMD64RORL_0(v)
   440  	case OpAMD64RORQ:
   441  		return rewriteValueAMD64_OpAMD64RORQ_0(v)
   442  	case OpAMD64RORW:
   443  		return rewriteValueAMD64_OpAMD64RORW_0(v)
   444  	case OpAMD64SARB:
   445  		return rewriteValueAMD64_OpAMD64SARB_0(v)
   446  	case OpAMD64SARBconst:
   447  		return rewriteValueAMD64_OpAMD64SARBconst_0(v)
   448  	case OpAMD64SARL:
   449  		return rewriteValueAMD64_OpAMD64SARL_0(v)
   450  	case OpAMD64SARLconst:
   451  		return rewriteValueAMD64_OpAMD64SARLconst_0(v)
   452  	case OpAMD64SARQ:
   453  		return rewriteValueAMD64_OpAMD64SARQ_0(v)
   454  	case OpAMD64SARQconst:
   455  		return rewriteValueAMD64_OpAMD64SARQconst_0(v)
   456  	case OpAMD64SARW:
   457  		return rewriteValueAMD64_OpAMD64SARW_0(v)
   458  	case OpAMD64SARWconst:
   459  		return rewriteValueAMD64_OpAMD64SARWconst_0(v)
   460  	case OpAMD64SBBLcarrymask:
   461  		return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v)
   462  	case OpAMD64SBBQ:
   463  		return rewriteValueAMD64_OpAMD64SBBQ_0(v)
   464  	case OpAMD64SBBQcarrymask:
   465  		return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v)
   466  	case OpAMD64SBBQconst:
   467  		return rewriteValueAMD64_OpAMD64SBBQconst_0(v)
   468  	case OpAMD64SETA:
   469  		return rewriteValueAMD64_OpAMD64SETA_0(v)
   470  	case OpAMD64SETAE:
   471  		return rewriteValueAMD64_OpAMD64SETAE_0(v)
   472  	case OpAMD64SETAEstore:
   473  		return rewriteValueAMD64_OpAMD64SETAEstore_0(v)
   474  	case OpAMD64SETAstore:
   475  		return rewriteValueAMD64_OpAMD64SETAstore_0(v)
   476  	case OpAMD64SETB:
   477  		return rewriteValueAMD64_OpAMD64SETB_0(v)
   478  	case OpAMD64SETBE:
   479  		return rewriteValueAMD64_OpAMD64SETBE_0(v)
   480  	case OpAMD64SETBEstore:
   481  		return rewriteValueAMD64_OpAMD64SETBEstore_0(v)
   482  	case OpAMD64SETBstore:
   483  		return rewriteValueAMD64_OpAMD64SETBstore_0(v)
   484  	case OpAMD64SETEQ:
   485  		return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v)
   486  	case OpAMD64SETEQstore:
   487  		return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v)
   488  	case OpAMD64SETG:
   489  		return rewriteValueAMD64_OpAMD64SETG_0(v)
   490  	case OpAMD64SETGE:
   491  		return rewriteValueAMD64_OpAMD64SETGE_0(v)
   492  	case OpAMD64SETGEstore:
   493  		return rewriteValueAMD64_OpAMD64SETGEstore_0(v)
   494  	case OpAMD64SETGstore:
   495  		return rewriteValueAMD64_OpAMD64SETGstore_0(v)
   496  	case OpAMD64SETL:
   497  		return rewriteValueAMD64_OpAMD64SETL_0(v)
   498  	case OpAMD64SETLE:
   499  		return rewriteValueAMD64_OpAMD64SETLE_0(v)
   500  	case OpAMD64SETLEstore:
   501  		return rewriteValueAMD64_OpAMD64SETLEstore_0(v)
   502  	case OpAMD64SETLstore:
   503  		return rewriteValueAMD64_OpAMD64SETLstore_0(v)
   504  	case OpAMD64SETNE:
   505  		return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v)
   506  	case OpAMD64SETNEstore:
   507  		return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v)
   508  	case OpAMD64SHLL:
   509  		return rewriteValueAMD64_OpAMD64SHLL_0(v)
   510  	case OpAMD64SHLLconst:
   511  		return rewriteValueAMD64_OpAMD64SHLLconst_0(v)
   512  	case OpAMD64SHLQ:
   513  		return rewriteValueAMD64_OpAMD64SHLQ_0(v)
   514  	case OpAMD64SHLQconst:
   515  		return rewriteValueAMD64_OpAMD64SHLQconst_0(v)
   516  	case OpAMD64SHRB:
   517  		return rewriteValueAMD64_OpAMD64SHRB_0(v)
   518  	case OpAMD64SHRBconst:
   519  		return rewriteValueAMD64_OpAMD64SHRBconst_0(v)
   520  	case OpAMD64SHRL:
   521  		return rewriteValueAMD64_OpAMD64SHRL_0(v)
   522  	case OpAMD64SHRLconst:
   523  		return rewriteValueAMD64_OpAMD64SHRLconst_0(v)
   524  	case OpAMD64SHRQ:
   525  		return rewriteValueAMD64_OpAMD64SHRQ_0(v)
   526  	case OpAMD64SHRQconst:
   527  		return rewriteValueAMD64_OpAMD64SHRQconst_0(v)
   528  	case OpAMD64SHRW:
   529  		return rewriteValueAMD64_OpAMD64SHRW_0(v)
   530  	case OpAMD64SHRWconst:
   531  		return rewriteValueAMD64_OpAMD64SHRWconst_0(v)
   532  	case OpAMD64SUBL:
   533  		return rewriteValueAMD64_OpAMD64SUBL_0(v)
   534  	case OpAMD64SUBLconst:
   535  		return rewriteValueAMD64_OpAMD64SUBLconst_0(v)
   536  	case OpAMD64SUBLload:
   537  		return rewriteValueAMD64_OpAMD64SUBLload_0(v)
   538  	case OpAMD64SUBLmodify:
   539  		return rewriteValueAMD64_OpAMD64SUBLmodify_0(v)
   540  	case OpAMD64SUBQ:
   541  		return rewriteValueAMD64_OpAMD64SUBQ_0(v)
   542  	case OpAMD64SUBQborrow:
   543  		return rewriteValueAMD64_OpAMD64SUBQborrow_0(v)
   544  	case OpAMD64SUBQconst:
   545  		return rewriteValueAMD64_OpAMD64SUBQconst_0(v)
   546  	case OpAMD64SUBQload:
   547  		return rewriteValueAMD64_OpAMD64SUBQload_0(v)
   548  	case OpAMD64SUBQmodify:
   549  		return rewriteValueAMD64_OpAMD64SUBQmodify_0(v)
   550  	case OpAMD64SUBSD:
   551  		return rewriteValueAMD64_OpAMD64SUBSD_0(v)
   552  	case OpAMD64SUBSDload:
   553  		return rewriteValueAMD64_OpAMD64SUBSDload_0(v)
   554  	case OpAMD64SUBSS:
   555  		return rewriteValueAMD64_OpAMD64SUBSS_0(v)
   556  	case OpAMD64SUBSSload:
   557  		return rewriteValueAMD64_OpAMD64SUBSSload_0(v)
   558  	case OpAMD64TESTB:
   559  		return rewriteValueAMD64_OpAMD64TESTB_0(v)
   560  	case OpAMD64TESTBconst:
   561  		return rewriteValueAMD64_OpAMD64TESTBconst_0(v)
   562  	case OpAMD64TESTL:
   563  		return rewriteValueAMD64_OpAMD64TESTL_0(v)
   564  	case OpAMD64TESTLconst:
   565  		return rewriteValueAMD64_OpAMD64TESTLconst_0(v)
   566  	case OpAMD64TESTQ:
   567  		return rewriteValueAMD64_OpAMD64TESTQ_0(v)
   568  	case OpAMD64TESTQconst:
   569  		return rewriteValueAMD64_OpAMD64TESTQconst_0(v)
   570  	case OpAMD64TESTW:
   571  		return rewriteValueAMD64_OpAMD64TESTW_0(v)
   572  	case OpAMD64TESTWconst:
   573  		return rewriteValueAMD64_OpAMD64TESTWconst_0(v)
   574  	case OpAMD64XADDLlock:
   575  		return rewriteValueAMD64_OpAMD64XADDLlock_0(v)
   576  	case OpAMD64XADDQlock:
   577  		return rewriteValueAMD64_OpAMD64XADDQlock_0(v)
   578  	case OpAMD64XCHGL:
   579  		return rewriteValueAMD64_OpAMD64XCHGL_0(v)
   580  	case OpAMD64XCHGQ:
   581  		return rewriteValueAMD64_OpAMD64XCHGQ_0(v)
   582  	case OpAMD64XORL:
   583  		return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v)
   584  	case OpAMD64XORLconst:
   585  		return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v)
   586  	case OpAMD64XORLconstmodify:
   587  		return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v)
   588  	case OpAMD64XORLload:
   589  		return rewriteValueAMD64_OpAMD64XORLload_0(v)
   590  	case OpAMD64XORLmodify:
   591  		return rewriteValueAMD64_OpAMD64XORLmodify_0(v)
   592  	case OpAMD64XORQ:
   593  		return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v)
   594  	case OpAMD64XORQconst:
   595  		return rewriteValueAMD64_OpAMD64XORQconst_0(v)
   596  	case OpAMD64XORQconstmodify:
   597  		return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v)
   598  	case OpAMD64XORQload:
   599  		return rewriteValueAMD64_OpAMD64XORQload_0(v)
   600  	case OpAMD64XORQmodify:
   601  		return rewriteValueAMD64_OpAMD64XORQmodify_0(v)
   602  	case OpAdd16:
   603  		return rewriteValueAMD64_OpAdd16_0(v)
   604  	case OpAdd32:
   605  		return rewriteValueAMD64_OpAdd32_0(v)
   606  	case OpAdd32F:
   607  		return rewriteValueAMD64_OpAdd32F_0(v)
   608  	case OpAdd64:
   609  		return rewriteValueAMD64_OpAdd64_0(v)
   610  	case OpAdd64F:
   611  		return rewriteValueAMD64_OpAdd64F_0(v)
   612  	case OpAdd8:
   613  		return rewriteValueAMD64_OpAdd8_0(v)
   614  	case OpAddPtr:
   615  		return rewriteValueAMD64_OpAddPtr_0(v)
   616  	case OpAddr:
   617  		return rewriteValueAMD64_OpAddr_0(v)
   618  	case OpAnd16:
   619  		return rewriteValueAMD64_OpAnd16_0(v)
   620  	case OpAnd32:
   621  		return rewriteValueAMD64_OpAnd32_0(v)
   622  	case OpAnd64:
   623  		return rewriteValueAMD64_OpAnd64_0(v)
   624  	case OpAnd8:
   625  		return rewriteValueAMD64_OpAnd8_0(v)
   626  	case OpAndB:
   627  		return rewriteValueAMD64_OpAndB_0(v)
   628  	case OpAtomicAdd32:
   629  		return rewriteValueAMD64_OpAtomicAdd32_0(v)
   630  	case OpAtomicAdd64:
   631  		return rewriteValueAMD64_OpAtomicAdd64_0(v)
   632  	case OpAtomicAnd8:
   633  		return rewriteValueAMD64_OpAtomicAnd8_0(v)
   634  	case OpAtomicCompareAndSwap32:
   635  		return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v)
   636  	case OpAtomicCompareAndSwap64:
   637  		return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v)
   638  	case OpAtomicExchange32:
   639  		return rewriteValueAMD64_OpAtomicExchange32_0(v)
   640  	case OpAtomicExchange64:
   641  		return rewriteValueAMD64_OpAtomicExchange64_0(v)
   642  	case OpAtomicLoad32:
   643  		return rewriteValueAMD64_OpAtomicLoad32_0(v)
   644  	case OpAtomicLoad64:
   645  		return rewriteValueAMD64_OpAtomicLoad64_0(v)
   646  	case OpAtomicLoadPtr:
   647  		return rewriteValueAMD64_OpAtomicLoadPtr_0(v)
   648  	case OpAtomicOr8:
   649  		return rewriteValueAMD64_OpAtomicOr8_0(v)
   650  	case OpAtomicStore32:
   651  		return rewriteValueAMD64_OpAtomicStore32_0(v)
   652  	case OpAtomicStore64:
   653  		return rewriteValueAMD64_OpAtomicStore64_0(v)
   654  	case OpAtomicStorePtrNoWB:
   655  		return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v)
   656  	case OpAvg64u:
   657  		return rewriteValueAMD64_OpAvg64u_0(v)
   658  	case OpBitLen16:
   659  		return rewriteValueAMD64_OpBitLen16_0(v)
   660  	case OpBitLen32:
   661  		return rewriteValueAMD64_OpBitLen32_0(v)
   662  	case OpBitLen64:
   663  		return rewriteValueAMD64_OpBitLen64_0(v)
   664  	case OpBitLen8:
   665  		return rewriteValueAMD64_OpBitLen8_0(v)
   666  	case OpBswap32:
   667  		return rewriteValueAMD64_OpBswap32_0(v)
   668  	case OpBswap64:
   669  		return rewriteValueAMD64_OpBswap64_0(v)
   670  	case OpCeil:
   671  		return rewriteValueAMD64_OpCeil_0(v)
   672  	case OpClosureCall:
   673  		return rewriteValueAMD64_OpClosureCall_0(v)
   674  	case OpCom16:
   675  		return rewriteValueAMD64_OpCom16_0(v)
   676  	case OpCom32:
   677  		return rewriteValueAMD64_OpCom32_0(v)
   678  	case OpCom64:
   679  		return rewriteValueAMD64_OpCom64_0(v)
   680  	case OpCom8:
   681  		return rewriteValueAMD64_OpCom8_0(v)
   682  	case OpCondSelect:
   683  		return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v)
   684  	case OpConst16:
   685  		return rewriteValueAMD64_OpConst16_0(v)
   686  	case OpConst32:
   687  		return rewriteValueAMD64_OpConst32_0(v)
   688  	case OpConst32F:
   689  		return rewriteValueAMD64_OpConst32F_0(v)
   690  	case OpConst64:
   691  		return rewriteValueAMD64_OpConst64_0(v)
   692  	case OpConst64F:
   693  		return rewriteValueAMD64_OpConst64F_0(v)
   694  	case OpConst8:
   695  		return rewriteValueAMD64_OpConst8_0(v)
   696  	case OpConstBool:
   697  		return rewriteValueAMD64_OpConstBool_0(v)
   698  	case OpConstNil:
   699  		return rewriteValueAMD64_OpConstNil_0(v)
   700  	case OpCtz16:
   701  		return rewriteValueAMD64_OpCtz16_0(v)
   702  	case OpCtz16NonZero:
   703  		return rewriteValueAMD64_OpCtz16NonZero_0(v)
   704  	case OpCtz32:
   705  		return rewriteValueAMD64_OpCtz32_0(v)
   706  	case OpCtz32NonZero:
   707  		return rewriteValueAMD64_OpCtz32NonZero_0(v)
   708  	case OpCtz64:
   709  		return rewriteValueAMD64_OpCtz64_0(v)
   710  	case OpCtz64NonZero:
   711  		return rewriteValueAMD64_OpCtz64NonZero_0(v)
   712  	case OpCtz8:
   713  		return rewriteValueAMD64_OpCtz8_0(v)
   714  	case OpCtz8NonZero:
   715  		return rewriteValueAMD64_OpCtz8NonZero_0(v)
   716  	case OpCvt32Fto32:
   717  		return rewriteValueAMD64_OpCvt32Fto32_0(v)
   718  	case OpCvt32Fto64:
   719  		return rewriteValueAMD64_OpCvt32Fto64_0(v)
   720  	case OpCvt32Fto64F:
   721  		return rewriteValueAMD64_OpCvt32Fto64F_0(v)
   722  	case OpCvt32to32F:
   723  		return rewriteValueAMD64_OpCvt32to32F_0(v)
   724  	case OpCvt32to64F:
   725  		return rewriteValueAMD64_OpCvt32to64F_0(v)
   726  	case OpCvt64Fto32:
   727  		return rewriteValueAMD64_OpCvt64Fto32_0(v)
   728  	case OpCvt64Fto32F:
   729  		return rewriteValueAMD64_OpCvt64Fto32F_0(v)
   730  	case OpCvt64Fto64:
   731  		return rewriteValueAMD64_OpCvt64Fto64_0(v)
   732  	case OpCvt64to32F:
   733  		return rewriteValueAMD64_OpCvt64to32F_0(v)
   734  	case OpCvt64to64F:
   735  		return rewriteValueAMD64_OpCvt64to64F_0(v)
   736  	case OpDiv128u:
   737  		return rewriteValueAMD64_OpDiv128u_0(v)
   738  	case OpDiv16:
   739  		return rewriteValueAMD64_OpDiv16_0(v)
   740  	case OpDiv16u:
   741  		return rewriteValueAMD64_OpDiv16u_0(v)
   742  	case OpDiv32:
   743  		return rewriteValueAMD64_OpDiv32_0(v)
   744  	case OpDiv32F:
   745  		return rewriteValueAMD64_OpDiv32F_0(v)
   746  	case OpDiv32u:
   747  		return rewriteValueAMD64_OpDiv32u_0(v)
   748  	case OpDiv64:
   749  		return rewriteValueAMD64_OpDiv64_0(v)
   750  	case OpDiv64F:
   751  		return rewriteValueAMD64_OpDiv64F_0(v)
   752  	case OpDiv64u:
   753  		return rewriteValueAMD64_OpDiv64u_0(v)
   754  	case OpDiv8:
   755  		return rewriteValueAMD64_OpDiv8_0(v)
   756  	case OpDiv8u:
   757  		return rewriteValueAMD64_OpDiv8u_0(v)
   758  	case OpEq16:
   759  		return rewriteValueAMD64_OpEq16_0(v)
   760  	case OpEq32:
   761  		return rewriteValueAMD64_OpEq32_0(v)
   762  	case OpEq32F:
   763  		return rewriteValueAMD64_OpEq32F_0(v)
   764  	case OpEq64:
   765  		return rewriteValueAMD64_OpEq64_0(v)
   766  	case OpEq64F:
   767  		return rewriteValueAMD64_OpEq64F_0(v)
   768  	case OpEq8:
   769  		return rewriteValueAMD64_OpEq8_0(v)
   770  	case OpEqB:
   771  		return rewriteValueAMD64_OpEqB_0(v)
   772  	case OpEqPtr:
   773  		return rewriteValueAMD64_OpEqPtr_0(v)
   774  	case OpFloor:
   775  		return rewriteValueAMD64_OpFloor_0(v)
   776  	case OpGeq16:
   777  		return rewriteValueAMD64_OpGeq16_0(v)
   778  	case OpGeq16U:
   779  		return rewriteValueAMD64_OpGeq16U_0(v)
   780  	case OpGeq32:
   781  		return rewriteValueAMD64_OpGeq32_0(v)
   782  	case OpGeq32F:
   783  		return rewriteValueAMD64_OpGeq32F_0(v)
   784  	case OpGeq32U:
   785  		return rewriteValueAMD64_OpGeq32U_0(v)
   786  	case OpGeq64:
   787  		return rewriteValueAMD64_OpGeq64_0(v)
   788  	case OpGeq64F:
   789  		return rewriteValueAMD64_OpGeq64F_0(v)
   790  	case OpGeq64U:
   791  		return rewriteValueAMD64_OpGeq64U_0(v)
   792  	case OpGeq8:
   793  		return rewriteValueAMD64_OpGeq8_0(v)
   794  	case OpGeq8U:
   795  		return rewriteValueAMD64_OpGeq8U_0(v)
   796  	case OpGetCallerPC:
   797  		return rewriteValueAMD64_OpGetCallerPC_0(v)
   798  	case OpGetCallerSP:
   799  		return rewriteValueAMD64_OpGetCallerSP_0(v)
   800  	case OpGetClosurePtr:
   801  		return rewriteValueAMD64_OpGetClosurePtr_0(v)
   802  	case OpGetG:
   803  		return rewriteValueAMD64_OpGetG_0(v)
   804  	case OpGreater16:
   805  		return rewriteValueAMD64_OpGreater16_0(v)
   806  	case OpGreater16U:
   807  		return rewriteValueAMD64_OpGreater16U_0(v)
   808  	case OpGreater32:
   809  		return rewriteValueAMD64_OpGreater32_0(v)
   810  	case OpGreater32F:
   811  		return rewriteValueAMD64_OpGreater32F_0(v)
   812  	case OpGreater32U:
   813  		return rewriteValueAMD64_OpGreater32U_0(v)
   814  	case OpGreater64:
   815  		return rewriteValueAMD64_OpGreater64_0(v)
   816  	case OpGreater64F:
   817  		return rewriteValueAMD64_OpGreater64F_0(v)
   818  	case OpGreater64U:
   819  		return rewriteValueAMD64_OpGreater64U_0(v)
   820  	case OpGreater8:
   821  		return rewriteValueAMD64_OpGreater8_0(v)
   822  	case OpGreater8U:
   823  		return rewriteValueAMD64_OpGreater8U_0(v)
   824  	case OpHmul32:
   825  		return rewriteValueAMD64_OpHmul32_0(v)
   826  	case OpHmul32u:
   827  		return rewriteValueAMD64_OpHmul32u_0(v)
   828  	case OpHmul64:
   829  		return rewriteValueAMD64_OpHmul64_0(v)
   830  	case OpHmul64u:
   831  		return rewriteValueAMD64_OpHmul64u_0(v)
   832  	case OpInt64Hi:
   833  		return rewriteValueAMD64_OpInt64Hi_0(v)
   834  	case OpInterCall:
   835  		return rewriteValueAMD64_OpInterCall_0(v)
   836  	case OpIsInBounds:
   837  		return rewriteValueAMD64_OpIsInBounds_0(v)
   838  	case OpIsNonNil:
   839  		return rewriteValueAMD64_OpIsNonNil_0(v)
   840  	case OpIsSliceInBounds:
   841  		return rewriteValueAMD64_OpIsSliceInBounds_0(v)
   842  	case OpLeq16:
   843  		return rewriteValueAMD64_OpLeq16_0(v)
   844  	case OpLeq16U:
   845  		return rewriteValueAMD64_OpLeq16U_0(v)
   846  	case OpLeq32:
   847  		return rewriteValueAMD64_OpLeq32_0(v)
   848  	case OpLeq32F:
   849  		return rewriteValueAMD64_OpLeq32F_0(v)
   850  	case OpLeq32U:
   851  		return rewriteValueAMD64_OpLeq32U_0(v)
   852  	case OpLeq64:
   853  		return rewriteValueAMD64_OpLeq64_0(v)
   854  	case OpLeq64F:
   855  		return rewriteValueAMD64_OpLeq64F_0(v)
   856  	case OpLeq64U:
   857  		return rewriteValueAMD64_OpLeq64U_0(v)
   858  	case OpLeq8:
   859  		return rewriteValueAMD64_OpLeq8_0(v)
   860  	case OpLeq8U:
   861  		return rewriteValueAMD64_OpLeq8U_0(v)
   862  	case OpLess16:
   863  		return rewriteValueAMD64_OpLess16_0(v)
   864  	case OpLess16U:
   865  		return rewriteValueAMD64_OpLess16U_0(v)
   866  	case OpLess32:
   867  		return rewriteValueAMD64_OpLess32_0(v)
   868  	case OpLess32F:
   869  		return rewriteValueAMD64_OpLess32F_0(v)
   870  	case OpLess32U:
   871  		return rewriteValueAMD64_OpLess32U_0(v)
   872  	case OpLess64:
   873  		return rewriteValueAMD64_OpLess64_0(v)
   874  	case OpLess64F:
   875  		return rewriteValueAMD64_OpLess64F_0(v)
   876  	case OpLess64U:
   877  		return rewriteValueAMD64_OpLess64U_0(v)
   878  	case OpLess8:
   879  		return rewriteValueAMD64_OpLess8_0(v)
   880  	case OpLess8U:
   881  		return rewriteValueAMD64_OpLess8U_0(v)
   882  	case OpLoad:
   883  		return rewriteValueAMD64_OpLoad_0(v)
   884  	case OpLocalAddr:
   885  		return rewriteValueAMD64_OpLocalAddr_0(v)
   886  	case OpLsh16x16:
   887  		return rewriteValueAMD64_OpLsh16x16_0(v)
   888  	case OpLsh16x32:
   889  		return rewriteValueAMD64_OpLsh16x32_0(v)
   890  	case OpLsh16x64:
   891  		return rewriteValueAMD64_OpLsh16x64_0(v)
   892  	case OpLsh16x8:
   893  		return rewriteValueAMD64_OpLsh16x8_0(v)
   894  	case OpLsh32x16:
   895  		return rewriteValueAMD64_OpLsh32x16_0(v)
   896  	case OpLsh32x32:
   897  		return rewriteValueAMD64_OpLsh32x32_0(v)
   898  	case OpLsh32x64:
   899  		return rewriteValueAMD64_OpLsh32x64_0(v)
   900  	case OpLsh32x8:
   901  		return rewriteValueAMD64_OpLsh32x8_0(v)
   902  	case OpLsh64x16:
   903  		return rewriteValueAMD64_OpLsh64x16_0(v)
   904  	case OpLsh64x32:
   905  		return rewriteValueAMD64_OpLsh64x32_0(v)
   906  	case OpLsh64x64:
   907  		return rewriteValueAMD64_OpLsh64x64_0(v)
   908  	case OpLsh64x8:
   909  		return rewriteValueAMD64_OpLsh64x8_0(v)
   910  	case OpLsh8x16:
   911  		return rewriteValueAMD64_OpLsh8x16_0(v)
   912  	case OpLsh8x32:
   913  		return rewriteValueAMD64_OpLsh8x32_0(v)
   914  	case OpLsh8x64:
   915  		return rewriteValueAMD64_OpLsh8x64_0(v)
   916  	case OpLsh8x8:
   917  		return rewriteValueAMD64_OpLsh8x8_0(v)
   918  	case OpMod16:
   919  		return rewriteValueAMD64_OpMod16_0(v)
   920  	case OpMod16u:
   921  		return rewriteValueAMD64_OpMod16u_0(v)
   922  	case OpMod32:
   923  		return rewriteValueAMD64_OpMod32_0(v)
   924  	case OpMod32u:
   925  		return rewriteValueAMD64_OpMod32u_0(v)
   926  	case OpMod64:
   927  		return rewriteValueAMD64_OpMod64_0(v)
   928  	case OpMod64u:
   929  		return rewriteValueAMD64_OpMod64u_0(v)
   930  	case OpMod8:
   931  		return rewriteValueAMD64_OpMod8_0(v)
   932  	case OpMod8u:
   933  		return rewriteValueAMD64_OpMod8u_0(v)
   934  	case OpMove:
   935  		return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) || rewriteValueAMD64_OpMove_20(v)
   936  	case OpMul16:
   937  		return rewriteValueAMD64_OpMul16_0(v)
   938  	case OpMul32:
   939  		return rewriteValueAMD64_OpMul32_0(v)
   940  	case OpMul32F:
   941  		return rewriteValueAMD64_OpMul32F_0(v)
   942  	case OpMul64:
   943  		return rewriteValueAMD64_OpMul64_0(v)
   944  	case OpMul64F:
   945  		return rewriteValueAMD64_OpMul64F_0(v)
   946  	case OpMul64uhilo:
   947  		return rewriteValueAMD64_OpMul64uhilo_0(v)
   948  	case OpMul8:
   949  		return rewriteValueAMD64_OpMul8_0(v)
   950  	case OpNeg16:
   951  		return rewriteValueAMD64_OpNeg16_0(v)
   952  	case OpNeg32:
   953  		return rewriteValueAMD64_OpNeg32_0(v)
   954  	case OpNeg32F:
   955  		return rewriteValueAMD64_OpNeg32F_0(v)
   956  	case OpNeg64:
   957  		return rewriteValueAMD64_OpNeg64_0(v)
   958  	case OpNeg64F:
   959  		return rewriteValueAMD64_OpNeg64F_0(v)
   960  	case OpNeg8:
   961  		return rewriteValueAMD64_OpNeg8_0(v)
   962  	case OpNeq16:
   963  		return rewriteValueAMD64_OpNeq16_0(v)
   964  	case OpNeq32:
   965  		return rewriteValueAMD64_OpNeq32_0(v)
   966  	case OpNeq32F:
   967  		return rewriteValueAMD64_OpNeq32F_0(v)
   968  	case OpNeq64:
   969  		return rewriteValueAMD64_OpNeq64_0(v)
   970  	case OpNeq64F:
   971  		return rewriteValueAMD64_OpNeq64F_0(v)
   972  	case OpNeq8:
   973  		return rewriteValueAMD64_OpNeq8_0(v)
   974  	case OpNeqB:
   975  		return rewriteValueAMD64_OpNeqB_0(v)
   976  	case OpNeqPtr:
   977  		return rewriteValueAMD64_OpNeqPtr_0(v)
   978  	case OpNilCheck:
   979  		return rewriteValueAMD64_OpNilCheck_0(v)
   980  	case OpNot:
   981  		return rewriteValueAMD64_OpNot_0(v)
   982  	case OpOffPtr:
   983  		return rewriteValueAMD64_OpOffPtr_0(v)
   984  	case OpOr16:
   985  		return rewriteValueAMD64_OpOr16_0(v)
   986  	case OpOr32:
   987  		return rewriteValueAMD64_OpOr32_0(v)
   988  	case OpOr64:
   989  		return rewriteValueAMD64_OpOr64_0(v)
   990  	case OpOr8:
   991  		return rewriteValueAMD64_OpOr8_0(v)
   992  	case OpOrB:
   993  		return rewriteValueAMD64_OpOrB_0(v)
   994  	case OpPopCount16:
   995  		return rewriteValueAMD64_OpPopCount16_0(v)
   996  	case OpPopCount32:
   997  		return rewriteValueAMD64_OpPopCount32_0(v)
   998  	case OpPopCount64:
   999  		return rewriteValueAMD64_OpPopCount64_0(v)
  1000  	case OpPopCount8:
  1001  		return rewriteValueAMD64_OpPopCount8_0(v)
  1002  	case OpRotateLeft16:
  1003  		return rewriteValueAMD64_OpRotateLeft16_0(v)
  1004  	case OpRotateLeft32:
  1005  		return rewriteValueAMD64_OpRotateLeft32_0(v)
  1006  	case OpRotateLeft64:
  1007  		return rewriteValueAMD64_OpRotateLeft64_0(v)
  1008  	case OpRotateLeft8:
  1009  		return rewriteValueAMD64_OpRotateLeft8_0(v)
  1010  	case OpRound32F:
  1011  		return rewriteValueAMD64_OpRound32F_0(v)
  1012  	case OpRound64F:
  1013  		return rewriteValueAMD64_OpRound64F_0(v)
  1014  	case OpRoundToEven:
  1015  		return rewriteValueAMD64_OpRoundToEven_0(v)
  1016  	case OpRsh16Ux16:
  1017  		return rewriteValueAMD64_OpRsh16Ux16_0(v)
  1018  	case OpRsh16Ux32:
  1019  		return rewriteValueAMD64_OpRsh16Ux32_0(v)
  1020  	case OpRsh16Ux64:
  1021  		return rewriteValueAMD64_OpRsh16Ux64_0(v)
  1022  	case OpRsh16Ux8:
  1023  		return rewriteValueAMD64_OpRsh16Ux8_0(v)
  1024  	case OpRsh16x16:
  1025  		return rewriteValueAMD64_OpRsh16x16_0(v)
  1026  	case OpRsh16x32:
  1027  		return rewriteValueAMD64_OpRsh16x32_0(v)
  1028  	case OpRsh16x64:
  1029  		return rewriteValueAMD64_OpRsh16x64_0(v)
  1030  	case OpRsh16x8:
  1031  		return rewriteValueAMD64_OpRsh16x8_0(v)
  1032  	case OpRsh32Ux16:
  1033  		return rewriteValueAMD64_OpRsh32Ux16_0(v)
  1034  	case OpRsh32Ux32:
  1035  		return rewriteValueAMD64_OpRsh32Ux32_0(v)
  1036  	case OpRsh32Ux64:
  1037  		return rewriteValueAMD64_OpRsh32Ux64_0(v)
  1038  	case OpRsh32Ux8:
  1039  		return rewriteValueAMD64_OpRsh32Ux8_0(v)
  1040  	case OpRsh32x16:
  1041  		return rewriteValueAMD64_OpRsh32x16_0(v)
  1042  	case OpRsh32x32:
  1043  		return rewriteValueAMD64_OpRsh32x32_0(v)
  1044  	case OpRsh32x64:
  1045  		return rewriteValueAMD64_OpRsh32x64_0(v)
  1046  	case OpRsh32x8:
  1047  		return rewriteValueAMD64_OpRsh32x8_0(v)
  1048  	case OpRsh64Ux16:
  1049  		return rewriteValueAMD64_OpRsh64Ux16_0(v)
  1050  	case OpRsh64Ux32:
  1051  		return rewriteValueAMD64_OpRsh64Ux32_0(v)
  1052  	case OpRsh64Ux64:
  1053  		return rewriteValueAMD64_OpRsh64Ux64_0(v)
  1054  	case OpRsh64Ux8:
  1055  		return rewriteValueAMD64_OpRsh64Ux8_0(v)
  1056  	case OpRsh64x16:
  1057  		return rewriteValueAMD64_OpRsh64x16_0(v)
  1058  	case OpRsh64x32:
  1059  		return rewriteValueAMD64_OpRsh64x32_0(v)
  1060  	case OpRsh64x64:
  1061  		return rewriteValueAMD64_OpRsh64x64_0(v)
  1062  	case OpRsh64x8:
  1063  		return rewriteValueAMD64_OpRsh64x8_0(v)
  1064  	case OpRsh8Ux16:
  1065  		return rewriteValueAMD64_OpRsh8Ux16_0(v)
  1066  	case OpRsh8Ux32:
  1067  		return rewriteValueAMD64_OpRsh8Ux32_0(v)
  1068  	case OpRsh8Ux64:
  1069  		return rewriteValueAMD64_OpRsh8Ux64_0(v)
  1070  	case OpRsh8Ux8:
  1071  		return rewriteValueAMD64_OpRsh8Ux8_0(v)
  1072  	case OpRsh8x16:
  1073  		return rewriteValueAMD64_OpRsh8x16_0(v)
  1074  	case OpRsh8x32:
  1075  		return rewriteValueAMD64_OpRsh8x32_0(v)
  1076  	case OpRsh8x64:
  1077  		return rewriteValueAMD64_OpRsh8x64_0(v)
  1078  	case OpRsh8x8:
  1079  		return rewriteValueAMD64_OpRsh8x8_0(v)
  1080  	case OpSelect0:
  1081  		return rewriteValueAMD64_OpSelect0_0(v)
  1082  	case OpSelect1:
  1083  		return rewriteValueAMD64_OpSelect1_0(v)
  1084  	case OpSignExt16to32:
  1085  		return rewriteValueAMD64_OpSignExt16to32_0(v)
  1086  	case OpSignExt16to64:
  1087  		return rewriteValueAMD64_OpSignExt16to64_0(v)
  1088  	case OpSignExt32to64:
  1089  		return rewriteValueAMD64_OpSignExt32to64_0(v)
  1090  	case OpSignExt8to16:
  1091  		return rewriteValueAMD64_OpSignExt8to16_0(v)
  1092  	case OpSignExt8to32:
  1093  		return rewriteValueAMD64_OpSignExt8to32_0(v)
  1094  	case OpSignExt8to64:
  1095  		return rewriteValueAMD64_OpSignExt8to64_0(v)
  1096  	case OpSlicemask:
  1097  		return rewriteValueAMD64_OpSlicemask_0(v)
  1098  	case OpSqrt:
  1099  		return rewriteValueAMD64_OpSqrt_0(v)
  1100  	case OpStaticCall:
  1101  		return rewriteValueAMD64_OpStaticCall_0(v)
  1102  	case OpStore:
  1103  		return rewriteValueAMD64_OpStore_0(v)
  1104  	case OpSub16:
  1105  		return rewriteValueAMD64_OpSub16_0(v)
  1106  	case OpSub32:
  1107  		return rewriteValueAMD64_OpSub32_0(v)
  1108  	case OpSub32F:
  1109  		return rewriteValueAMD64_OpSub32F_0(v)
  1110  	case OpSub64:
  1111  		return rewriteValueAMD64_OpSub64_0(v)
  1112  	case OpSub64F:
  1113  		return rewriteValueAMD64_OpSub64F_0(v)
  1114  	case OpSub8:
  1115  		return rewriteValueAMD64_OpSub8_0(v)
  1116  	case OpSubPtr:
  1117  		return rewriteValueAMD64_OpSubPtr_0(v)
  1118  	case OpTrunc:
  1119  		return rewriteValueAMD64_OpTrunc_0(v)
  1120  	case OpTrunc16to8:
  1121  		return rewriteValueAMD64_OpTrunc16to8_0(v)
  1122  	case OpTrunc32to16:
  1123  		return rewriteValueAMD64_OpTrunc32to16_0(v)
  1124  	case OpTrunc32to8:
  1125  		return rewriteValueAMD64_OpTrunc32to8_0(v)
  1126  	case OpTrunc64to16:
  1127  		return rewriteValueAMD64_OpTrunc64to16_0(v)
  1128  	case OpTrunc64to32:
  1129  		return rewriteValueAMD64_OpTrunc64to32_0(v)
  1130  	case OpTrunc64to8:
  1131  		return rewriteValueAMD64_OpTrunc64to8_0(v)
  1132  	case OpWB:
  1133  		return rewriteValueAMD64_OpWB_0(v)
  1134  	case OpXor16:
  1135  		return rewriteValueAMD64_OpXor16_0(v)
  1136  	case OpXor32:
  1137  		return rewriteValueAMD64_OpXor32_0(v)
  1138  	case OpXor64:
  1139  		return rewriteValueAMD64_OpXor64_0(v)
  1140  	case OpXor8:
  1141  		return rewriteValueAMD64_OpXor8_0(v)
  1142  	case OpZero:
  1143  		return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v)
  1144  	case OpZeroExt16to32:
  1145  		return rewriteValueAMD64_OpZeroExt16to32_0(v)
  1146  	case OpZeroExt16to64:
  1147  		return rewriteValueAMD64_OpZeroExt16to64_0(v)
  1148  	case OpZeroExt32to64:
  1149  		return rewriteValueAMD64_OpZeroExt32to64_0(v)
  1150  	case OpZeroExt8to16:
  1151  		return rewriteValueAMD64_OpZeroExt8to16_0(v)
  1152  	case OpZeroExt8to32:
  1153  		return rewriteValueAMD64_OpZeroExt8to32_0(v)
  1154  	case OpZeroExt8to64:
  1155  		return rewriteValueAMD64_OpZeroExt8to64_0(v)
  1156  	}
  1157  	return false
  1158  }
  1159  func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool {
  1160  	// match: (ADCQ x (MOVQconst [c]) carry)
  1161  	// cond: is32Bit(c)
  1162  	// result: (ADCQconst x [c] carry)
  1163  	for {
  1164  		_ = v.Args[2]
  1165  		x := v.Args[0]
  1166  		v_1 := v.Args[1]
  1167  		if v_1.Op != OpAMD64MOVQconst {
  1168  			break
  1169  		}
  1170  		c := v_1.AuxInt
  1171  		carry := v.Args[2]
  1172  		if !(is32Bit(c)) {
  1173  			break
  1174  		}
  1175  		v.reset(OpAMD64ADCQconst)
  1176  		v.AuxInt = c
  1177  		v.AddArg(x)
  1178  		v.AddArg(carry)
  1179  		return true
  1180  	}
  1181  	// match: (ADCQ (MOVQconst [c]) x carry)
  1182  	// cond: is32Bit(c)
  1183  	// result: (ADCQconst x [c] carry)
  1184  	for {
  1185  		_ = v.Args[2]
  1186  		v_0 := v.Args[0]
  1187  		if v_0.Op != OpAMD64MOVQconst {
  1188  			break
  1189  		}
  1190  		c := v_0.AuxInt
  1191  		x := v.Args[1]
  1192  		carry := v.Args[2]
  1193  		if !(is32Bit(c)) {
  1194  			break
  1195  		}
  1196  		v.reset(OpAMD64ADCQconst)
  1197  		v.AuxInt = c
  1198  		v.AddArg(x)
  1199  		v.AddArg(carry)
  1200  		return true
  1201  	}
  1202  	// match: (ADCQ x y (FlagEQ))
  1203  	// cond:
  1204  	// result: (ADDQcarry x y)
  1205  	for {
  1206  		_ = v.Args[2]
  1207  		x := v.Args[0]
  1208  		y := v.Args[1]
  1209  		v_2 := v.Args[2]
  1210  		if v_2.Op != OpAMD64FlagEQ {
  1211  			break
  1212  		}
  1213  		v.reset(OpAMD64ADDQcarry)
  1214  		v.AddArg(x)
  1215  		v.AddArg(y)
  1216  		return true
  1217  	}
  1218  	return false
  1219  }
  1220  func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool {
  1221  	// match: (ADCQconst x [c] (FlagEQ))
  1222  	// cond:
  1223  	// result: (ADDQconstcarry x [c])
  1224  	for {
  1225  		c := v.AuxInt
  1226  		_ = v.Args[1]
  1227  		x := v.Args[0]
  1228  		v_1 := v.Args[1]
  1229  		if v_1.Op != OpAMD64FlagEQ {
  1230  			break
  1231  		}
  1232  		v.reset(OpAMD64ADDQconstcarry)
  1233  		v.AuxInt = c
  1234  		v.AddArg(x)
  1235  		return true
  1236  	}
  1237  	return false
  1238  }
  1239  func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool {
  1240  	// match: (ADDL x (MOVLconst [c]))
  1241  	// cond:
  1242  	// result: (ADDLconst [c] x)
  1243  	for {
  1244  		_ = v.Args[1]
  1245  		x := v.Args[0]
  1246  		v_1 := v.Args[1]
  1247  		if v_1.Op != OpAMD64MOVLconst {
  1248  			break
  1249  		}
  1250  		c := v_1.AuxInt
  1251  		v.reset(OpAMD64ADDLconst)
  1252  		v.AuxInt = c
  1253  		v.AddArg(x)
  1254  		return true
  1255  	}
  1256  	// match: (ADDL (MOVLconst [c]) x)
  1257  	// cond:
  1258  	// result: (ADDLconst [c] x)
  1259  	for {
  1260  		_ = v.Args[1]
  1261  		v_0 := v.Args[0]
  1262  		if v_0.Op != OpAMD64MOVLconst {
  1263  			break
  1264  		}
  1265  		c := v_0.AuxInt
  1266  		x := v.Args[1]
  1267  		v.reset(OpAMD64ADDLconst)
  1268  		v.AuxInt = c
  1269  		v.AddArg(x)
  1270  		return true
  1271  	}
  1272  	// match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
  1273  	// cond: d==32-c
  1274  	// result: (ROLLconst x [c])
  1275  	for {
  1276  		_ = v.Args[1]
  1277  		v_0 := v.Args[0]
  1278  		if v_0.Op != OpAMD64SHLLconst {
  1279  			break
  1280  		}
  1281  		c := v_0.AuxInt
  1282  		x := v_0.Args[0]
  1283  		v_1 := v.Args[1]
  1284  		if v_1.Op != OpAMD64SHRLconst {
  1285  			break
  1286  		}
  1287  		d := v_1.AuxInt
  1288  		if x != v_1.Args[0] {
  1289  			break
  1290  		}
  1291  		if !(d == 32-c) {
  1292  			break
  1293  		}
  1294  		v.reset(OpAMD64ROLLconst)
  1295  		v.AuxInt = c
  1296  		v.AddArg(x)
  1297  		return true
  1298  	}
  1299  	// match: (ADDL (SHRLconst x [d]) (SHLLconst x [c]))
  1300  	// cond: d==32-c
  1301  	// result: (ROLLconst x [c])
  1302  	for {
  1303  		_ = v.Args[1]
  1304  		v_0 := v.Args[0]
  1305  		if v_0.Op != OpAMD64SHRLconst {
  1306  			break
  1307  		}
  1308  		d := v_0.AuxInt
  1309  		x := v_0.Args[0]
  1310  		v_1 := v.Args[1]
  1311  		if v_1.Op != OpAMD64SHLLconst {
  1312  			break
  1313  		}
  1314  		c := v_1.AuxInt
  1315  		if x != v_1.Args[0] {
  1316  			break
  1317  		}
  1318  		if !(d == 32-c) {
  1319  			break
  1320  		}
  1321  		v.reset(OpAMD64ROLLconst)
  1322  		v.AuxInt = c
  1323  		v.AddArg(x)
  1324  		return true
  1325  	}
  1326  	// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
  1327  	// cond: d==16-c && c < 16 && t.Size() == 2
  1328  	// result: (ROLWconst x [c])
  1329  	for {
  1330  		t := v.Type
  1331  		_ = v.Args[1]
  1332  		v_0 := v.Args[0]
  1333  		if v_0.Op != OpAMD64SHLLconst {
  1334  			break
  1335  		}
  1336  		c := v_0.AuxInt
  1337  		x := v_0.Args[0]
  1338  		v_1 := v.Args[1]
  1339  		if v_1.Op != OpAMD64SHRWconst {
  1340  			break
  1341  		}
  1342  		d := v_1.AuxInt
  1343  		if x != v_1.Args[0] {
  1344  			break
  1345  		}
  1346  		if !(d == 16-c && c < 16 && t.Size() == 2) {
  1347  			break
  1348  		}
  1349  		v.reset(OpAMD64ROLWconst)
  1350  		v.AuxInt = c
  1351  		v.AddArg(x)
  1352  		return true
  1353  	}
  1354  	// match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c]))
  1355  	// cond: d==16-c && c < 16 && t.Size() == 2
  1356  	// result: (ROLWconst x [c])
  1357  	for {
  1358  		t := v.Type
  1359  		_ = v.Args[1]
  1360  		v_0 := v.Args[0]
  1361  		if v_0.Op != OpAMD64SHRWconst {
  1362  			break
  1363  		}
  1364  		d := v_0.AuxInt
  1365  		x := v_0.Args[0]
  1366  		v_1 := v.Args[1]
  1367  		if v_1.Op != OpAMD64SHLLconst {
  1368  			break
  1369  		}
  1370  		c := v_1.AuxInt
  1371  		if x != v_1.Args[0] {
  1372  			break
  1373  		}
  1374  		if !(d == 16-c && c < 16 && t.Size() == 2) {
  1375  			break
  1376  		}
  1377  		v.reset(OpAMD64ROLWconst)
  1378  		v.AuxInt = c
  1379  		v.AddArg(x)
  1380  		return true
  1381  	}
  1382  	// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
  1383  	// cond: d==8-c && c < 8 && t.Size() == 1
  1384  	// result: (ROLBconst x [c])
  1385  	for {
  1386  		t := v.Type
  1387  		_ = v.Args[1]
  1388  		v_0 := v.Args[0]
  1389  		if v_0.Op != OpAMD64SHLLconst {
  1390  			break
  1391  		}
  1392  		c := v_0.AuxInt
  1393  		x := v_0.Args[0]
  1394  		v_1 := v.Args[1]
  1395  		if v_1.Op != OpAMD64SHRBconst {
  1396  			break
  1397  		}
  1398  		d := v_1.AuxInt
  1399  		if x != v_1.Args[0] {
  1400  			break
  1401  		}
  1402  		if !(d == 8-c && c < 8 && t.Size() == 1) {
  1403  			break
  1404  		}
  1405  		v.reset(OpAMD64ROLBconst)
  1406  		v.AuxInt = c
  1407  		v.AddArg(x)
  1408  		return true
  1409  	}
  1410  	// match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c]))
  1411  	// cond: d==8-c && c < 8 && t.Size() == 1
  1412  	// result: (ROLBconst x [c])
  1413  	for {
  1414  		t := v.Type
  1415  		_ = v.Args[1]
  1416  		v_0 := v.Args[0]
  1417  		if v_0.Op != OpAMD64SHRBconst {
  1418  			break
  1419  		}
  1420  		d := v_0.AuxInt
  1421  		x := v_0.Args[0]
  1422  		v_1 := v.Args[1]
  1423  		if v_1.Op != OpAMD64SHLLconst {
  1424  			break
  1425  		}
  1426  		c := v_1.AuxInt
  1427  		if x != v_1.Args[0] {
  1428  			break
  1429  		}
  1430  		if !(d == 8-c && c < 8 && t.Size() == 1) {
  1431  			break
  1432  		}
  1433  		v.reset(OpAMD64ROLBconst)
  1434  		v.AuxInt = c
  1435  		v.AddArg(x)
  1436  		return true
  1437  	}
  1438  	// match: (ADDL x (SHLLconst [3] y))
  1439  	// cond:
  1440  	// result: (LEAL8 x y)
  1441  	for {
  1442  		_ = v.Args[1]
  1443  		x := v.Args[0]
  1444  		v_1 := v.Args[1]
  1445  		if v_1.Op != OpAMD64SHLLconst {
  1446  			break
  1447  		}
  1448  		if v_1.AuxInt != 3 {
  1449  			break
  1450  		}
  1451  		y := v_1.Args[0]
  1452  		v.reset(OpAMD64LEAL8)
  1453  		v.AddArg(x)
  1454  		v.AddArg(y)
  1455  		return true
  1456  	}
  1457  	// match: (ADDL (SHLLconst [3] y) x)
  1458  	// cond:
  1459  	// result: (LEAL8 x y)
  1460  	for {
  1461  		_ = v.Args[1]
  1462  		v_0 := v.Args[0]
  1463  		if v_0.Op != OpAMD64SHLLconst {
  1464  			break
  1465  		}
  1466  		if v_0.AuxInt != 3 {
  1467  			break
  1468  		}
  1469  		y := v_0.Args[0]
  1470  		x := v.Args[1]
  1471  		v.reset(OpAMD64LEAL8)
  1472  		v.AddArg(x)
  1473  		v.AddArg(y)
  1474  		return true
  1475  	}
  1476  	return false
  1477  }
  1478  func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool {
  1479  	// match: (ADDL x (SHLLconst [2] y))
  1480  	// cond:
  1481  	// result: (LEAL4 x y)
  1482  	for {
  1483  		_ = v.Args[1]
  1484  		x := v.Args[0]
  1485  		v_1 := v.Args[1]
  1486  		if v_1.Op != OpAMD64SHLLconst {
  1487  			break
  1488  		}
  1489  		if v_1.AuxInt != 2 {
  1490  			break
  1491  		}
  1492  		y := v_1.Args[0]
  1493  		v.reset(OpAMD64LEAL4)
  1494  		v.AddArg(x)
  1495  		v.AddArg(y)
  1496  		return true
  1497  	}
  1498  	// match: (ADDL (SHLLconst [2] y) x)
  1499  	// cond:
  1500  	// result: (LEAL4 x y)
  1501  	for {
  1502  		_ = v.Args[1]
  1503  		v_0 := v.Args[0]
  1504  		if v_0.Op != OpAMD64SHLLconst {
  1505  			break
  1506  		}
  1507  		if v_0.AuxInt != 2 {
  1508  			break
  1509  		}
  1510  		y := v_0.Args[0]
  1511  		x := v.Args[1]
  1512  		v.reset(OpAMD64LEAL4)
  1513  		v.AddArg(x)
  1514  		v.AddArg(y)
  1515  		return true
  1516  	}
  1517  	// match: (ADDL x (SHLLconst [1] y))
  1518  	// cond:
  1519  	// result: (LEAL2 x y)
  1520  	for {
  1521  		_ = v.Args[1]
  1522  		x := v.Args[0]
  1523  		v_1 := v.Args[1]
  1524  		if v_1.Op != OpAMD64SHLLconst {
  1525  			break
  1526  		}
  1527  		if v_1.AuxInt != 1 {
  1528  			break
  1529  		}
  1530  		y := v_1.Args[0]
  1531  		v.reset(OpAMD64LEAL2)
  1532  		v.AddArg(x)
  1533  		v.AddArg(y)
  1534  		return true
  1535  	}
  1536  	// match: (ADDL (SHLLconst [1] y) x)
  1537  	// cond:
  1538  	// result: (LEAL2 x y)
  1539  	for {
  1540  		_ = v.Args[1]
  1541  		v_0 := v.Args[0]
  1542  		if v_0.Op != OpAMD64SHLLconst {
  1543  			break
  1544  		}
  1545  		if v_0.AuxInt != 1 {
  1546  			break
  1547  		}
  1548  		y := v_0.Args[0]
  1549  		x := v.Args[1]
  1550  		v.reset(OpAMD64LEAL2)
  1551  		v.AddArg(x)
  1552  		v.AddArg(y)
  1553  		return true
  1554  	}
  1555  	// match: (ADDL x (ADDL y y))
  1556  	// cond:
  1557  	// result: (LEAL2 x y)
  1558  	for {
  1559  		_ = v.Args[1]
  1560  		x := v.Args[0]
  1561  		v_1 := v.Args[1]
  1562  		if v_1.Op != OpAMD64ADDL {
  1563  			break
  1564  		}
  1565  		_ = v_1.Args[1]
  1566  		y := v_1.Args[0]
  1567  		if y != v_1.Args[1] {
  1568  			break
  1569  		}
  1570  		v.reset(OpAMD64LEAL2)
  1571  		v.AddArg(x)
  1572  		v.AddArg(y)
  1573  		return true
  1574  	}
  1575  	// match: (ADDL (ADDL y y) x)
  1576  	// cond:
  1577  	// result: (LEAL2 x y)
  1578  	for {
  1579  		_ = v.Args[1]
  1580  		v_0 := v.Args[0]
  1581  		if v_0.Op != OpAMD64ADDL {
  1582  			break
  1583  		}
  1584  		_ = v_0.Args[1]
  1585  		y := v_0.Args[0]
  1586  		if y != v_0.Args[1] {
  1587  			break
  1588  		}
  1589  		x := v.Args[1]
  1590  		v.reset(OpAMD64LEAL2)
  1591  		v.AddArg(x)
  1592  		v.AddArg(y)
  1593  		return true
  1594  	}
  1595  	// match: (ADDL x (ADDL x y))
  1596  	// cond:
  1597  	// result: (LEAL2 y x)
  1598  	for {
  1599  		_ = v.Args[1]
  1600  		x := v.Args[0]
  1601  		v_1 := v.Args[1]
  1602  		if v_1.Op != OpAMD64ADDL {
  1603  			break
  1604  		}
  1605  		_ = v_1.Args[1]
  1606  		if x != v_1.Args[0] {
  1607  			break
  1608  		}
  1609  		y := v_1.Args[1]
  1610  		v.reset(OpAMD64LEAL2)
  1611  		v.AddArg(y)
  1612  		v.AddArg(x)
  1613  		return true
  1614  	}
  1615  	// match: (ADDL x (ADDL y x))
  1616  	// cond:
  1617  	// result: (LEAL2 y x)
  1618  	for {
  1619  		_ = v.Args[1]
  1620  		x := v.Args[0]
  1621  		v_1 := v.Args[1]
  1622  		if v_1.Op != OpAMD64ADDL {
  1623  			break
  1624  		}
  1625  		_ = v_1.Args[1]
  1626  		y := v_1.Args[0]
  1627  		if x != v_1.Args[1] {
  1628  			break
  1629  		}
  1630  		v.reset(OpAMD64LEAL2)
  1631  		v.AddArg(y)
  1632  		v.AddArg(x)
  1633  		return true
  1634  	}
  1635  	// match: (ADDL (ADDL x y) x)
  1636  	// cond:
  1637  	// result: (LEAL2 y x)
  1638  	for {
  1639  		_ = v.Args[1]
  1640  		v_0 := v.Args[0]
  1641  		if v_0.Op != OpAMD64ADDL {
  1642  			break
  1643  		}
  1644  		_ = v_0.Args[1]
  1645  		x := v_0.Args[0]
  1646  		y := v_0.Args[1]
  1647  		if x != v.Args[1] {
  1648  			break
  1649  		}
  1650  		v.reset(OpAMD64LEAL2)
  1651  		v.AddArg(y)
  1652  		v.AddArg(x)
  1653  		return true
  1654  	}
  1655  	// match: (ADDL (ADDL y x) x)
  1656  	// cond:
  1657  	// result: (LEAL2 y x)
  1658  	for {
  1659  		_ = v.Args[1]
  1660  		v_0 := v.Args[0]
  1661  		if v_0.Op != OpAMD64ADDL {
  1662  			break
  1663  		}
  1664  		_ = v_0.Args[1]
  1665  		y := v_0.Args[0]
  1666  		x := v_0.Args[1]
  1667  		if x != v.Args[1] {
  1668  			break
  1669  		}
  1670  		v.reset(OpAMD64LEAL2)
  1671  		v.AddArg(y)
  1672  		v.AddArg(x)
  1673  		return true
  1674  	}
  1675  	return false
  1676  }
  1677  func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool {
  1678  	// match: (ADDL (ADDLconst [c] x) y)
  1679  	// cond:
  1680  	// result: (LEAL1 [c] x y)
  1681  	for {
  1682  		_ = v.Args[1]
  1683  		v_0 := v.Args[0]
  1684  		if v_0.Op != OpAMD64ADDLconst {
  1685  			break
  1686  		}
  1687  		c := v_0.AuxInt
  1688  		x := v_0.Args[0]
  1689  		y := v.Args[1]
  1690  		v.reset(OpAMD64LEAL1)
  1691  		v.AuxInt = c
  1692  		v.AddArg(x)
  1693  		v.AddArg(y)
  1694  		return true
  1695  	}
  1696  	// match: (ADDL y (ADDLconst [c] x))
  1697  	// cond:
  1698  	// result: (LEAL1 [c] x y)
  1699  	for {
  1700  		_ = v.Args[1]
  1701  		y := v.Args[0]
  1702  		v_1 := v.Args[1]
  1703  		if v_1.Op != OpAMD64ADDLconst {
  1704  			break
  1705  		}
  1706  		c := v_1.AuxInt
  1707  		x := v_1.Args[0]
  1708  		v.reset(OpAMD64LEAL1)
  1709  		v.AuxInt = c
  1710  		v.AddArg(x)
  1711  		v.AddArg(y)
  1712  		return true
  1713  	}
  1714  	// match: (ADDL x (LEAL [c] {s} y))
  1715  	// cond: x.Op != OpSB && y.Op != OpSB
  1716  	// result: (LEAL1 [c] {s} x y)
  1717  	for {
  1718  		_ = v.Args[1]
  1719  		x := v.Args[0]
  1720  		v_1 := v.Args[1]
  1721  		if v_1.Op != OpAMD64LEAL {
  1722  			break
  1723  		}
  1724  		c := v_1.AuxInt
  1725  		s := v_1.Aux
  1726  		y := v_1.Args[0]
  1727  		if !(x.Op != OpSB && y.Op != OpSB) {
  1728  			break
  1729  		}
  1730  		v.reset(OpAMD64LEAL1)
  1731  		v.AuxInt = c
  1732  		v.Aux = s
  1733  		v.AddArg(x)
  1734  		v.AddArg(y)
  1735  		return true
  1736  	}
  1737  	// match: (ADDL (LEAL [c] {s} y) x)
  1738  	// cond: x.Op != OpSB && y.Op != OpSB
  1739  	// result: (LEAL1 [c] {s} x y)
  1740  	for {
  1741  		_ = v.Args[1]
  1742  		v_0 := v.Args[0]
  1743  		if v_0.Op != OpAMD64LEAL {
  1744  			break
  1745  		}
  1746  		c := v_0.AuxInt
  1747  		s := v_0.Aux
  1748  		y := v_0.Args[0]
  1749  		x := v.Args[1]
  1750  		if !(x.Op != OpSB && y.Op != OpSB) {
  1751  			break
  1752  		}
  1753  		v.reset(OpAMD64LEAL1)
  1754  		v.AuxInt = c
  1755  		v.Aux = s
  1756  		v.AddArg(x)
  1757  		v.AddArg(y)
  1758  		return true
  1759  	}
  1760  	// match: (ADDL x (NEGL y))
  1761  	// cond:
  1762  	// result: (SUBL x y)
  1763  	for {
  1764  		_ = v.Args[1]
  1765  		x := v.Args[0]
  1766  		v_1 := v.Args[1]
  1767  		if v_1.Op != OpAMD64NEGL {
  1768  			break
  1769  		}
  1770  		y := v_1.Args[0]
  1771  		v.reset(OpAMD64SUBL)
  1772  		v.AddArg(x)
  1773  		v.AddArg(y)
  1774  		return true
  1775  	}
  1776  	// match: (ADDL (NEGL y) x)
  1777  	// cond:
  1778  	// result: (SUBL x y)
  1779  	for {
  1780  		_ = v.Args[1]
  1781  		v_0 := v.Args[0]
  1782  		if v_0.Op != OpAMD64NEGL {
  1783  			break
  1784  		}
  1785  		y := v_0.Args[0]
  1786  		x := v.Args[1]
  1787  		v.reset(OpAMD64SUBL)
  1788  		v.AddArg(x)
  1789  		v.AddArg(y)
  1790  		return true
  1791  	}
  1792  	// match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
  1793  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  1794  	// result: (ADDLload x [off] {sym} ptr mem)
  1795  	for {
  1796  		_ = v.Args[1]
  1797  		x := v.Args[0]
  1798  		l := v.Args[1]
  1799  		if l.Op != OpAMD64MOVLload {
  1800  			break
  1801  		}
  1802  		off := l.AuxInt
  1803  		sym := l.Aux
  1804  		_ = l.Args[1]
  1805  		ptr := l.Args[0]
  1806  		mem := l.Args[1]
  1807  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  1808  			break
  1809  		}
  1810  		v.reset(OpAMD64ADDLload)
  1811  		v.AuxInt = off
  1812  		v.Aux = sym
  1813  		v.AddArg(x)
  1814  		v.AddArg(ptr)
  1815  		v.AddArg(mem)
  1816  		return true
  1817  	}
  1818  	// match: (ADDL l:(MOVLload [off] {sym} ptr mem) x)
  1819  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  1820  	// result: (ADDLload x [off] {sym} ptr mem)
  1821  	for {
  1822  		_ = v.Args[1]
  1823  		l := v.Args[0]
  1824  		if l.Op != OpAMD64MOVLload {
  1825  			break
  1826  		}
  1827  		off := l.AuxInt
  1828  		sym := l.Aux
  1829  		_ = l.Args[1]
  1830  		ptr := l.Args[0]
  1831  		mem := l.Args[1]
  1832  		x := v.Args[1]
  1833  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  1834  			break
  1835  		}
  1836  		v.reset(OpAMD64ADDLload)
  1837  		v.AuxInt = off
  1838  		v.Aux = sym
  1839  		v.AddArg(x)
  1840  		v.AddArg(ptr)
  1841  		v.AddArg(mem)
  1842  		return true
  1843  	}
  1844  	return false
  1845  }
  1846  func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool {
  1847  	// match: (ADDLconst [c] (ADDL x y))
  1848  	// cond:
  1849  	// result: (LEAL1 [c] x y)
  1850  	for {
  1851  		c := v.AuxInt
  1852  		v_0 := v.Args[0]
  1853  		if v_0.Op != OpAMD64ADDL {
  1854  			break
  1855  		}
  1856  		_ = v_0.Args[1]
  1857  		x := v_0.Args[0]
  1858  		y := v_0.Args[1]
  1859  		v.reset(OpAMD64LEAL1)
  1860  		v.AuxInt = c
  1861  		v.AddArg(x)
  1862  		v.AddArg(y)
  1863  		return true
  1864  	}
  1865  	// match: (ADDLconst [c] (SHLLconst [1] x))
  1866  	// cond:
  1867  	// result: (LEAL1 [c] x x)
  1868  	for {
  1869  		c := v.AuxInt
  1870  		v_0 := v.Args[0]
  1871  		if v_0.Op != OpAMD64SHLLconst {
  1872  			break
  1873  		}
  1874  		if v_0.AuxInt != 1 {
  1875  			break
  1876  		}
  1877  		x := v_0.Args[0]
  1878  		v.reset(OpAMD64LEAL1)
  1879  		v.AuxInt = c
  1880  		v.AddArg(x)
  1881  		v.AddArg(x)
  1882  		return true
  1883  	}
  1884  	// match: (ADDLconst [c] (LEAL [d] {s} x))
  1885  	// cond: is32Bit(c+d)
  1886  	// result: (LEAL [c+d] {s} x)
  1887  	for {
  1888  		c := v.AuxInt
  1889  		v_0 := v.Args[0]
  1890  		if v_0.Op != OpAMD64LEAL {
  1891  			break
  1892  		}
  1893  		d := v_0.AuxInt
  1894  		s := v_0.Aux
  1895  		x := v_0.Args[0]
  1896  		if !(is32Bit(c + d)) {
  1897  			break
  1898  		}
  1899  		v.reset(OpAMD64LEAL)
  1900  		v.AuxInt = c + d
  1901  		v.Aux = s
  1902  		v.AddArg(x)
  1903  		return true
  1904  	}
  1905  	// match: (ADDLconst [c] (LEAL1 [d] {s} x y))
  1906  	// cond: is32Bit(c+d)
  1907  	// result: (LEAL1 [c+d] {s} x y)
  1908  	for {
  1909  		c := v.AuxInt
  1910  		v_0 := v.Args[0]
  1911  		if v_0.Op != OpAMD64LEAL1 {
  1912  			break
  1913  		}
  1914  		d := v_0.AuxInt
  1915  		s := v_0.Aux
  1916  		_ = v_0.Args[1]
  1917  		x := v_0.Args[0]
  1918  		y := v_0.Args[1]
  1919  		if !(is32Bit(c + d)) {
  1920  			break
  1921  		}
  1922  		v.reset(OpAMD64LEAL1)
  1923  		v.AuxInt = c + d
  1924  		v.Aux = s
  1925  		v.AddArg(x)
  1926  		v.AddArg(y)
  1927  		return true
  1928  	}
  1929  	// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
  1930  	// cond: is32Bit(c+d)
  1931  	// result: (LEAL2 [c+d] {s} x y)
  1932  	for {
  1933  		c := v.AuxInt
  1934  		v_0 := v.Args[0]
  1935  		if v_0.Op != OpAMD64LEAL2 {
  1936  			break
  1937  		}
  1938  		d := v_0.AuxInt
  1939  		s := v_0.Aux
  1940  		_ = v_0.Args[1]
  1941  		x := v_0.Args[0]
  1942  		y := v_0.Args[1]
  1943  		if !(is32Bit(c + d)) {
  1944  			break
  1945  		}
  1946  		v.reset(OpAMD64LEAL2)
  1947  		v.AuxInt = c + d
  1948  		v.Aux = s
  1949  		v.AddArg(x)
  1950  		v.AddArg(y)
  1951  		return true
  1952  	}
  1953  	// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
  1954  	// cond: is32Bit(c+d)
  1955  	// result: (LEAL4 [c+d] {s} x y)
  1956  	for {
  1957  		c := v.AuxInt
  1958  		v_0 := v.Args[0]
  1959  		if v_0.Op != OpAMD64LEAL4 {
  1960  			break
  1961  		}
  1962  		d := v_0.AuxInt
  1963  		s := v_0.Aux
  1964  		_ = v_0.Args[1]
  1965  		x := v_0.Args[0]
  1966  		y := v_0.Args[1]
  1967  		if !(is32Bit(c + d)) {
  1968  			break
  1969  		}
  1970  		v.reset(OpAMD64LEAL4)
  1971  		v.AuxInt = c + d
  1972  		v.Aux = s
  1973  		v.AddArg(x)
  1974  		v.AddArg(y)
  1975  		return true
  1976  	}
  1977  	// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
  1978  	// cond: is32Bit(c+d)
  1979  	// result: (LEAL8 [c+d] {s} x y)
  1980  	for {
  1981  		c := v.AuxInt
  1982  		v_0 := v.Args[0]
  1983  		if v_0.Op != OpAMD64LEAL8 {
  1984  			break
  1985  		}
  1986  		d := v_0.AuxInt
  1987  		s := v_0.Aux
  1988  		_ = v_0.Args[1]
  1989  		x := v_0.Args[0]
  1990  		y := v_0.Args[1]
  1991  		if !(is32Bit(c + d)) {
  1992  			break
  1993  		}
  1994  		v.reset(OpAMD64LEAL8)
  1995  		v.AuxInt = c + d
  1996  		v.Aux = s
  1997  		v.AddArg(x)
  1998  		v.AddArg(y)
  1999  		return true
  2000  	}
  2001  	// match: (ADDLconst [c] x)
  2002  	// cond: int32(c)==0
  2003  	// result: x
  2004  	for {
  2005  		c := v.AuxInt
  2006  		x := v.Args[0]
  2007  		if !(int32(c) == 0) {
  2008  			break
  2009  		}
  2010  		v.reset(OpCopy)
  2011  		v.Type = x.Type
  2012  		v.AddArg(x)
  2013  		return true
  2014  	}
  2015  	// match: (ADDLconst [c] (MOVLconst [d]))
  2016  	// cond:
  2017  	// result: (MOVLconst [int64(int32(c+d))])
  2018  	for {
  2019  		c := v.AuxInt
  2020  		v_0 := v.Args[0]
  2021  		if v_0.Op != OpAMD64MOVLconst {
  2022  			break
  2023  		}
  2024  		d := v_0.AuxInt
  2025  		v.reset(OpAMD64MOVLconst)
  2026  		v.AuxInt = int64(int32(c + d))
  2027  		return true
  2028  	}
  2029  	// match: (ADDLconst [c] (ADDLconst [d] x))
  2030  	// cond:
  2031  	// result: (ADDLconst [int64(int32(c+d))] x)
  2032  	for {
  2033  		c := v.AuxInt
  2034  		v_0 := v.Args[0]
  2035  		if v_0.Op != OpAMD64ADDLconst {
  2036  			break
  2037  		}
  2038  		d := v_0.AuxInt
  2039  		x := v_0.Args[0]
  2040  		v.reset(OpAMD64ADDLconst)
  2041  		v.AuxInt = int64(int32(c + d))
  2042  		v.AddArg(x)
  2043  		return true
  2044  	}
  2045  	return false
  2046  }
  2047  func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool {
  2048  	// match: (ADDLconst [off] x:(SP))
  2049  	// cond:
  2050  	// result: (LEAL [off] x)
  2051  	for {
  2052  		off := v.AuxInt
  2053  		x := v.Args[0]
  2054  		if x.Op != OpSP {
  2055  			break
  2056  		}
  2057  		v.reset(OpAMD64LEAL)
  2058  		v.AuxInt = off
  2059  		v.AddArg(x)
  2060  		return true
  2061  	}
  2062  	return false
  2063  }
  2064  func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool {
  2065  	// match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  2066  	// cond: ValAndOff(valoff1).canAdd(off2)
  2067  	// result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  2068  	for {
  2069  		valoff1 := v.AuxInt
  2070  		sym := v.Aux
  2071  		_ = v.Args[1]
  2072  		v_0 := v.Args[0]
  2073  		if v_0.Op != OpAMD64ADDQconst {
  2074  			break
  2075  		}
  2076  		off2 := v_0.AuxInt
  2077  		base := v_0.Args[0]
  2078  		mem := v.Args[1]
  2079  		if !(ValAndOff(valoff1).canAdd(off2)) {
  2080  			break
  2081  		}
  2082  		v.reset(OpAMD64ADDLconstmodify)
  2083  		v.AuxInt = ValAndOff(valoff1).add(off2)
  2084  		v.Aux = sym
  2085  		v.AddArg(base)
  2086  		v.AddArg(mem)
  2087  		return true
  2088  	}
  2089  	// match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  2090  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  2091  	// result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  2092  	for {
  2093  		valoff1 := v.AuxInt
  2094  		sym1 := v.Aux
  2095  		_ = v.Args[1]
  2096  		v_0 := v.Args[0]
  2097  		if v_0.Op != OpAMD64LEAQ {
  2098  			break
  2099  		}
  2100  		off2 := v_0.AuxInt
  2101  		sym2 := v_0.Aux
  2102  		base := v_0.Args[0]
  2103  		mem := v.Args[1]
  2104  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  2105  			break
  2106  		}
  2107  		v.reset(OpAMD64ADDLconstmodify)
  2108  		v.AuxInt = ValAndOff(valoff1).add(off2)
  2109  		v.Aux = mergeSym(sym1, sym2)
  2110  		v.AddArg(base)
  2111  		v.AddArg(mem)
  2112  		return true
  2113  	}
  2114  	return false
  2115  }
  2116  func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool {
  2117  	b := v.Block
  2118  	_ = b
  2119  	typ := &b.Func.Config.Types
  2120  	_ = typ
  2121  	// match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
  2122  	// cond: is32Bit(off1+off2)
  2123  	// result: (ADDLload [off1+off2] {sym} val base mem)
  2124  	for {
  2125  		off1 := v.AuxInt
  2126  		sym := v.Aux
  2127  		_ = v.Args[2]
  2128  		val := v.Args[0]
  2129  		v_1 := v.Args[1]
  2130  		if v_1.Op != OpAMD64ADDQconst {
  2131  			break
  2132  		}
  2133  		off2 := v_1.AuxInt
  2134  		base := v_1.Args[0]
  2135  		mem := v.Args[2]
  2136  		if !(is32Bit(off1 + off2)) {
  2137  			break
  2138  		}
  2139  		v.reset(OpAMD64ADDLload)
  2140  		v.AuxInt = off1 + off2
  2141  		v.Aux = sym
  2142  		v.AddArg(val)
  2143  		v.AddArg(base)
  2144  		v.AddArg(mem)
  2145  		return true
  2146  	}
  2147  	// match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  2148  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  2149  	// result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  2150  	for {
  2151  		off1 := v.AuxInt
  2152  		sym1 := v.Aux
  2153  		_ = v.Args[2]
  2154  		val := v.Args[0]
  2155  		v_1 := v.Args[1]
  2156  		if v_1.Op != OpAMD64LEAQ {
  2157  			break
  2158  		}
  2159  		off2 := v_1.AuxInt
  2160  		sym2 := v_1.Aux
  2161  		base := v_1.Args[0]
  2162  		mem := v.Args[2]
  2163  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  2164  			break
  2165  		}
  2166  		v.reset(OpAMD64ADDLload)
  2167  		v.AuxInt = off1 + off2
  2168  		v.Aux = mergeSym(sym1, sym2)
  2169  		v.AddArg(val)
  2170  		v.AddArg(base)
  2171  		v.AddArg(mem)
  2172  		return true
  2173  	}
  2174  	// match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
  2175  	// cond:
  2176  	// result: (ADDL x (MOVLf2i y))
  2177  	for {
  2178  		off := v.AuxInt
  2179  		sym := v.Aux
  2180  		_ = v.Args[2]
  2181  		x := v.Args[0]
  2182  		ptr := v.Args[1]
  2183  		v_2 := v.Args[2]
  2184  		if v_2.Op != OpAMD64MOVSSstore {
  2185  			break
  2186  		}
  2187  		if v_2.AuxInt != off {
  2188  			break
  2189  		}
  2190  		if v_2.Aux != sym {
  2191  			break
  2192  		}
  2193  		_ = v_2.Args[2]
  2194  		if ptr != v_2.Args[0] {
  2195  			break
  2196  		}
  2197  		y := v_2.Args[1]
  2198  		v.reset(OpAMD64ADDL)
  2199  		v.AddArg(x)
  2200  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
  2201  		v0.AddArg(y)
  2202  		v.AddArg(v0)
  2203  		return true
  2204  	}
  2205  	return false
  2206  }
  2207  func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool {
  2208  	// match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  2209  	// cond: is32Bit(off1+off2)
  2210  	// result: (ADDLmodify [off1+off2] {sym} base val mem)
  2211  	for {
  2212  		off1 := v.AuxInt
  2213  		sym := v.Aux
  2214  		_ = v.Args[2]
  2215  		v_0 := v.Args[0]
  2216  		if v_0.Op != OpAMD64ADDQconst {
  2217  			break
  2218  		}
  2219  		off2 := v_0.AuxInt
  2220  		base := v_0.Args[0]
  2221  		val := v.Args[1]
  2222  		mem := v.Args[2]
  2223  		if !(is32Bit(off1 + off2)) {
  2224  			break
  2225  		}
  2226  		v.reset(OpAMD64ADDLmodify)
  2227  		v.AuxInt = off1 + off2
  2228  		v.Aux = sym
  2229  		v.AddArg(base)
  2230  		v.AddArg(val)
  2231  		v.AddArg(mem)
  2232  		return true
  2233  	}
  2234  	// match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  2235  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  2236  	// result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  2237  	for {
  2238  		off1 := v.AuxInt
  2239  		sym1 := v.Aux
  2240  		_ = v.Args[2]
  2241  		v_0 := v.Args[0]
  2242  		if v_0.Op != OpAMD64LEAQ {
  2243  			break
  2244  		}
  2245  		off2 := v_0.AuxInt
  2246  		sym2 := v_0.Aux
  2247  		base := v_0.Args[0]
  2248  		val := v.Args[1]
  2249  		mem := v.Args[2]
  2250  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  2251  			break
  2252  		}
  2253  		v.reset(OpAMD64ADDLmodify)
  2254  		v.AuxInt = off1 + off2
  2255  		v.Aux = mergeSym(sym1, sym2)
  2256  		v.AddArg(base)
  2257  		v.AddArg(val)
  2258  		v.AddArg(mem)
  2259  		return true
  2260  	}
  2261  	return false
  2262  }
  2263  func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool {
  2264  	// match: (ADDQ x (MOVQconst [c]))
  2265  	// cond: is32Bit(c)
  2266  	// result: (ADDQconst [c] x)
  2267  	for {
  2268  		_ = v.Args[1]
  2269  		x := v.Args[0]
  2270  		v_1 := v.Args[1]
  2271  		if v_1.Op != OpAMD64MOVQconst {
  2272  			break
  2273  		}
  2274  		c := v_1.AuxInt
  2275  		if !(is32Bit(c)) {
  2276  			break
  2277  		}
  2278  		v.reset(OpAMD64ADDQconst)
  2279  		v.AuxInt = c
  2280  		v.AddArg(x)
  2281  		return true
  2282  	}
  2283  	// match: (ADDQ (MOVQconst [c]) x)
  2284  	// cond: is32Bit(c)
  2285  	// result: (ADDQconst [c] x)
  2286  	for {
  2287  		_ = v.Args[1]
  2288  		v_0 := v.Args[0]
  2289  		if v_0.Op != OpAMD64MOVQconst {
  2290  			break
  2291  		}
  2292  		c := v_0.AuxInt
  2293  		x := v.Args[1]
  2294  		if !(is32Bit(c)) {
  2295  			break
  2296  		}
  2297  		v.reset(OpAMD64ADDQconst)
  2298  		v.AuxInt = c
  2299  		v.AddArg(x)
  2300  		return true
  2301  	}
  2302  	// match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
  2303  	// cond: d==64-c
  2304  	// result: (ROLQconst x [c])
  2305  	for {
  2306  		_ = v.Args[1]
  2307  		v_0 := v.Args[0]
  2308  		if v_0.Op != OpAMD64SHLQconst {
  2309  			break
  2310  		}
  2311  		c := v_0.AuxInt
  2312  		x := v_0.Args[0]
  2313  		v_1 := v.Args[1]
  2314  		if v_1.Op != OpAMD64SHRQconst {
  2315  			break
  2316  		}
  2317  		d := v_1.AuxInt
  2318  		if x != v_1.Args[0] {
  2319  			break
  2320  		}
  2321  		if !(d == 64-c) {
  2322  			break
  2323  		}
  2324  		v.reset(OpAMD64ROLQconst)
  2325  		v.AuxInt = c
  2326  		v.AddArg(x)
  2327  		return true
  2328  	}
  2329  	// match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c]))
  2330  	// cond: d==64-c
  2331  	// result: (ROLQconst x [c])
  2332  	for {
  2333  		_ = v.Args[1]
  2334  		v_0 := v.Args[0]
  2335  		if v_0.Op != OpAMD64SHRQconst {
  2336  			break
  2337  		}
  2338  		d := v_0.AuxInt
  2339  		x := v_0.Args[0]
  2340  		v_1 := v.Args[1]
  2341  		if v_1.Op != OpAMD64SHLQconst {
  2342  			break
  2343  		}
  2344  		c := v_1.AuxInt
  2345  		if x != v_1.Args[0] {
  2346  			break
  2347  		}
  2348  		if !(d == 64-c) {
  2349  			break
  2350  		}
  2351  		v.reset(OpAMD64ROLQconst)
  2352  		v.AuxInt = c
  2353  		v.AddArg(x)
  2354  		return true
  2355  	}
  2356  	// match: (ADDQ x (SHLQconst [3] y))
  2357  	// cond:
  2358  	// result: (LEAQ8 x y)
  2359  	for {
  2360  		_ = v.Args[1]
  2361  		x := v.Args[0]
  2362  		v_1 := v.Args[1]
  2363  		if v_1.Op != OpAMD64SHLQconst {
  2364  			break
  2365  		}
  2366  		if v_1.AuxInt != 3 {
  2367  			break
  2368  		}
  2369  		y := v_1.Args[0]
  2370  		v.reset(OpAMD64LEAQ8)
  2371  		v.AddArg(x)
  2372  		v.AddArg(y)
  2373  		return true
  2374  	}
  2375  	// match: (ADDQ (SHLQconst [3] y) x)
  2376  	// cond:
  2377  	// result: (LEAQ8 x y)
  2378  	for {
  2379  		_ = v.Args[1]
  2380  		v_0 := v.Args[0]
  2381  		if v_0.Op != OpAMD64SHLQconst {
  2382  			break
  2383  		}
  2384  		if v_0.AuxInt != 3 {
  2385  			break
  2386  		}
  2387  		y := v_0.Args[0]
  2388  		x := v.Args[1]
  2389  		v.reset(OpAMD64LEAQ8)
  2390  		v.AddArg(x)
  2391  		v.AddArg(y)
  2392  		return true
  2393  	}
  2394  	// match: (ADDQ x (SHLQconst [2] y))
  2395  	// cond:
  2396  	// result: (LEAQ4 x y)
  2397  	for {
  2398  		_ = v.Args[1]
  2399  		x := v.Args[0]
  2400  		v_1 := v.Args[1]
  2401  		if v_1.Op != OpAMD64SHLQconst {
  2402  			break
  2403  		}
  2404  		if v_1.AuxInt != 2 {
  2405  			break
  2406  		}
  2407  		y := v_1.Args[0]
  2408  		v.reset(OpAMD64LEAQ4)
  2409  		v.AddArg(x)
  2410  		v.AddArg(y)
  2411  		return true
  2412  	}
  2413  	// match: (ADDQ (SHLQconst [2] y) x)
  2414  	// cond:
  2415  	// result: (LEAQ4 x y)
  2416  	for {
  2417  		_ = v.Args[1]
  2418  		v_0 := v.Args[0]
  2419  		if v_0.Op != OpAMD64SHLQconst {
  2420  			break
  2421  		}
  2422  		if v_0.AuxInt != 2 {
  2423  			break
  2424  		}
  2425  		y := v_0.Args[0]
  2426  		x := v.Args[1]
  2427  		v.reset(OpAMD64LEAQ4)
  2428  		v.AddArg(x)
  2429  		v.AddArg(y)
  2430  		return true
  2431  	}
  2432  	// match: (ADDQ x (SHLQconst [1] y))
  2433  	// cond:
  2434  	// result: (LEAQ2 x y)
  2435  	for {
  2436  		_ = v.Args[1]
  2437  		x := v.Args[0]
  2438  		v_1 := v.Args[1]
  2439  		if v_1.Op != OpAMD64SHLQconst {
  2440  			break
  2441  		}
  2442  		if v_1.AuxInt != 1 {
  2443  			break
  2444  		}
  2445  		y := v_1.Args[0]
  2446  		v.reset(OpAMD64LEAQ2)
  2447  		v.AddArg(x)
  2448  		v.AddArg(y)
  2449  		return true
  2450  	}
  2451  	// match: (ADDQ (SHLQconst [1] y) x)
  2452  	// cond:
  2453  	// result: (LEAQ2 x y)
  2454  	for {
  2455  		_ = v.Args[1]
  2456  		v_0 := v.Args[0]
  2457  		if v_0.Op != OpAMD64SHLQconst {
  2458  			break
  2459  		}
  2460  		if v_0.AuxInt != 1 {
  2461  			break
  2462  		}
  2463  		y := v_0.Args[0]
  2464  		x := v.Args[1]
  2465  		v.reset(OpAMD64LEAQ2)
  2466  		v.AddArg(x)
  2467  		v.AddArg(y)
  2468  		return true
  2469  	}
  2470  	return false
  2471  }
  2472  func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool {
  2473  	// match: (ADDQ x (ADDQ y y))
  2474  	// cond:
  2475  	// result: (LEAQ2 x y)
  2476  	for {
  2477  		_ = v.Args[1]
  2478  		x := v.Args[0]
  2479  		v_1 := v.Args[1]
  2480  		if v_1.Op != OpAMD64ADDQ {
  2481  			break
  2482  		}
  2483  		_ = v_1.Args[1]
  2484  		y := v_1.Args[0]
  2485  		if y != v_1.Args[1] {
  2486  			break
  2487  		}
  2488  		v.reset(OpAMD64LEAQ2)
  2489  		v.AddArg(x)
  2490  		v.AddArg(y)
  2491  		return true
  2492  	}
  2493  	// match: (ADDQ (ADDQ y y) x)
  2494  	// cond:
  2495  	// result: (LEAQ2 x y)
  2496  	for {
  2497  		_ = v.Args[1]
  2498  		v_0 := v.Args[0]
  2499  		if v_0.Op != OpAMD64ADDQ {
  2500  			break
  2501  		}
  2502  		_ = v_0.Args[1]
  2503  		y := v_0.Args[0]
  2504  		if y != v_0.Args[1] {
  2505  			break
  2506  		}
  2507  		x := v.Args[1]
  2508  		v.reset(OpAMD64LEAQ2)
  2509  		v.AddArg(x)
  2510  		v.AddArg(y)
  2511  		return true
  2512  	}
  2513  	// match: (ADDQ x (ADDQ x y))
  2514  	// cond:
  2515  	// result: (LEAQ2 y x)
  2516  	for {
  2517  		_ = v.Args[1]
  2518  		x := v.Args[0]
  2519  		v_1 := v.Args[1]
  2520  		if v_1.Op != OpAMD64ADDQ {
  2521  			break
  2522  		}
  2523  		_ = v_1.Args[1]
  2524  		if x != v_1.Args[0] {
  2525  			break
  2526  		}
  2527  		y := v_1.Args[1]
  2528  		v.reset(OpAMD64LEAQ2)
  2529  		v.AddArg(y)
  2530  		v.AddArg(x)
  2531  		return true
  2532  	}
  2533  	// match: (ADDQ x (ADDQ y x))
  2534  	// cond:
  2535  	// result: (LEAQ2 y x)
  2536  	for {
  2537  		_ = v.Args[1]
  2538  		x := v.Args[0]
  2539  		v_1 := v.Args[1]
  2540  		if v_1.Op != OpAMD64ADDQ {
  2541  			break
  2542  		}
  2543  		_ = v_1.Args[1]
  2544  		y := v_1.Args[0]
  2545  		if x != v_1.Args[1] {
  2546  			break
  2547  		}
  2548  		v.reset(OpAMD64LEAQ2)
  2549  		v.AddArg(y)
  2550  		v.AddArg(x)
  2551  		return true
  2552  	}
  2553  	// match: (ADDQ (ADDQ x y) x)
  2554  	// cond:
  2555  	// result: (LEAQ2 y x)
  2556  	for {
  2557  		_ = v.Args[1]
  2558  		v_0 := v.Args[0]
  2559  		if v_0.Op != OpAMD64ADDQ {
  2560  			break
  2561  		}
  2562  		_ = v_0.Args[1]
  2563  		x := v_0.Args[0]
  2564  		y := v_0.Args[1]
  2565  		if x != v.Args[1] {
  2566  			break
  2567  		}
  2568  		v.reset(OpAMD64LEAQ2)
  2569  		v.AddArg(y)
  2570  		v.AddArg(x)
  2571  		return true
  2572  	}
  2573  	// match: (ADDQ (ADDQ y x) x)
  2574  	// cond:
  2575  	// result: (LEAQ2 y x)
  2576  	for {
  2577  		_ = v.Args[1]
  2578  		v_0 := v.Args[0]
  2579  		if v_0.Op != OpAMD64ADDQ {
  2580  			break
  2581  		}
  2582  		_ = v_0.Args[1]
  2583  		y := v_0.Args[0]
  2584  		x := v_0.Args[1]
  2585  		if x != v.Args[1] {
  2586  			break
  2587  		}
  2588  		v.reset(OpAMD64LEAQ2)
  2589  		v.AddArg(y)
  2590  		v.AddArg(x)
  2591  		return true
  2592  	}
  2593  	// match: (ADDQ (ADDQconst [c] x) y)
  2594  	// cond:
  2595  	// result: (LEAQ1 [c] x y)
  2596  	for {
  2597  		_ = v.Args[1]
  2598  		v_0 := v.Args[0]
  2599  		if v_0.Op != OpAMD64ADDQconst {
  2600  			break
  2601  		}
  2602  		c := v_0.AuxInt
  2603  		x := v_0.Args[0]
  2604  		y := v.Args[1]
  2605  		v.reset(OpAMD64LEAQ1)
  2606  		v.AuxInt = c
  2607  		v.AddArg(x)
  2608  		v.AddArg(y)
  2609  		return true
  2610  	}
  2611  	// match: (ADDQ y (ADDQconst [c] x))
  2612  	// cond:
  2613  	// result: (LEAQ1 [c] x y)
  2614  	for {
  2615  		_ = v.Args[1]
  2616  		y := v.Args[0]
  2617  		v_1 := v.Args[1]
  2618  		if v_1.Op != OpAMD64ADDQconst {
  2619  			break
  2620  		}
  2621  		c := v_1.AuxInt
  2622  		x := v_1.Args[0]
  2623  		v.reset(OpAMD64LEAQ1)
  2624  		v.AuxInt = c
  2625  		v.AddArg(x)
  2626  		v.AddArg(y)
  2627  		return true
  2628  	}
  2629  	// match: (ADDQ x (LEAQ [c] {s} y))
  2630  	// cond: x.Op != OpSB && y.Op != OpSB
  2631  	// result: (LEAQ1 [c] {s} x y)
  2632  	for {
  2633  		_ = v.Args[1]
  2634  		x := v.Args[0]
  2635  		v_1 := v.Args[1]
  2636  		if v_1.Op != OpAMD64LEAQ {
  2637  			break
  2638  		}
  2639  		c := v_1.AuxInt
  2640  		s := v_1.Aux
  2641  		y := v_1.Args[0]
  2642  		if !(x.Op != OpSB && y.Op != OpSB) {
  2643  			break
  2644  		}
  2645  		v.reset(OpAMD64LEAQ1)
  2646  		v.AuxInt = c
  2647  		v.Aux = s
  2648  		v.AddArg(x)
  2649  		v.AddArg(y)
  2650  		return true
  2651  	}
  2652  	// match: (ADDQ (LEAQ [c] {s} y) x)
  2653  	// cond: x.Op != OpSB && y.Op != OpSB
  2654  	// result: (LEAQ1 [c] {s} x y)
  2655  	for {
  2656  		_ = v.Args[1]
  2657  		v_0 := v.Args[0]
  2658  		if v_0.Op != OpAMD64LEAQ {
  2659  			break
  2660  		}
  2661  		c := v_0.AuxInt
  2662  		s := v_0.Aux
  2663  		y := v_0.Args[0]
  2664  		x := v.Args[1]
  2665  		if !(x.Op != OpSB && y.Op != OpSB) {
  2666  			break
  2667  		}
  2668  		v.reset(OpAMD64LEAQ1)
  2669  		v.AuxInt = c
  2670  		v.Aux = s
  2671  		v.AddArg(x)
  2672  		v.AddArg(y)
  2673  		return true
  2674  	}
  2675  	return false
  2676  }
  2677  func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool {
  2678  	// match: (ADDQ x (NEGQ y))
  2679  	// cond:
  2680  	// result: (SUBQ x y)
  2681  	for {
  2682  		_ = v.Args[1]
  2683  		x := v.Args[0]
  2684  		v_1 := v.Args[1]
  2685  		if v_1.Op != OpAMD64NEGQ {
  2686  			break
  2687  		}
  2688  		y := v_1.Args[0]
  2689  		v.reset(OpAMD64SUBQ)
  2690  		v.AddArg(x)
  2691  		v.AddArg(y)
  2692  		return true
  2693  	}
  2694  	// match: (ADDQ (NEGQ y) x)
  2695  	// cond:
  2696  	// result: (SUBQ x y)
  2697  	for {
  2698  		_ = v.Args[1]
  2699  		v_0 := v.Args[0]
  2700  		if v_0.Op != OpAMD64NEGQ {
  2701  			break
  2702  		}
  2703  		y := v_0.Args[0]
  2704  		x := v.Args[1]
  2705  		v.reset(OpAMD64SUBQ)
  2706  		v.AddArg(x)
  2707  		v.AddArg(y)
  2708  		return true
  2709  	}
  2710  	// match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
  2711  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  2712  	// result: (ADDQload x [off] {sym} ptr mem)
  2713  	for {
  2714  		_ = v.Args[1]
  2715  		x := v.Args[0]
  2716  		l := v.Args[1]
  2717  		if l.Op != OpAMD64MOVQload {
  2718  			break
  2719  		}
  2720  		off := l.AuxInt
  2721  		sym := l.Aux
  2722  		_ = l.Args[1]
  2723  		ptr := l.Args[0]
  2724  		mem := l.Args[1]
  2725  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  2726  			break
  2727  		}
  2728  		v.reset(OpAMD64ADDQload)
  2729  		v.AuxInt = off
  2730  		v.Aux = sym
  2731  		v.AddArg(x)
  2732  		v.AddArg(ptr)
  2733  		v.AddArg(mem)
  2734  		return true
  2735  	}
  2736  	// match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x)
  2737  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  2738  	// result: (ADDQload x [off] {sym} ptr mem)
  2739  	for {
  2740  		_ = v.Args[1]
  2741  		l := v.Args[0]
  2742  		if l.Op != OpAMD64MOVQload {
  2743  			break
  2744  		}
  2745  		off := l.AuxInt
  2746  		sym := l.Aux
  2747  		_ = l.Args[1]
  2748  		ptr := l.Args[0]
  2749  		mem := l.Args[1]
  2750  		x := v.Args[1]
  2751  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  2752  			break
  2753  		}
  2754  		v.reset(OpAMD64ADDQload)
  2755  		v.AuxInt = off
  2756  		v.Aux = sym
  2757  		v.AddArg(x)
  2758  		v.AddArg(ptr)
  2759  		v.AddArg(mem)
  2760  		return true
  2761  	}
  2762  	return false
  2763  }
  2764  func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool {
  2765  	// match: (ADDQcarry x (MOVQconst [c]))
  2766  	// cond: is32Bit(c)
  2767  	// result: (ADDQconstcarry x [c])
  2768  	for {
  2769  		_ = v.Args[1]
  2770  		x := v.Args[0]
  2771  		v_1 := v.Args[1]
  2772  		if v_1.Op != OpAMD64MOVQconst {
  2773  			break
  2774  		}
  2775  		c := v_1.AuxInt
  2776  		if !(is32Bit(c)) {
  2777  			break
  2778  		}
  2779  		v.reset(OpAMD64ADDQconstcarry)
  2780  		v.AuxInt = c
  2781  		v.AddArg(x)
  2782  		return true
  2783  	}
  2784  	// match: (ADDQcarry (MOVQconst [c]) x)
  2785  	// cond: is32Bit(c)
  2786  	// result: (ADDQconstcarry x [c])
  2787  	for {
  2788  		_ = v.Args[1]
  2789  		v_0 := v.Args[0]
  2790  		if v_0.Op != OpAMD64MOVQconst {
  2791  			break
  2792  		}
  2793  		c := v_0.AuxInt
  2794  		x := v.Args[1]
  2795  		if !(is32Bit(c)) {
  2796  			break
  2797  		}
  2798  		v.reset(OpAMD64ADDQconstcarry)
  2799  		v.AuxInt = c
  2800  		v.AddArg(x)
  2801  		return true
  2802  	}
  2803  	return false
  2804  }
  2805  func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool {
  2806  	// match: (ADDQconst [c] (ADDQ x y))
  2807  	// cond:
  2808  	// result: (LEAQ1 [c] x y)
  2809  	for {
  2810  		c := v.AuxInt
  2811  		v_0 := v.Args[0]
  2812  		if v_0.Op != OpAMD64ADDQ {
  2813  			break
  2814  		}
  2815  		_ = v_0.Args[1]
  2816  		x := v_0.Args[0]
  2817  		y := v_0.Args[1]
  2818  		v.reset(OpAMD64LEAQ1)
  2819  		v.AuxInt = c
  2820  		v.AddArg(x)
  2821  		v.AddArg(y)
  2822  		return true
  2823  	}
  2824  	// match: (ADDQconst [c] (SHLQconst [1] x))
  2825  	// cond:
  2826  	// result: (LEAQ1 [c] x x)
  2827  	for {
  2828  		c := v.AuxInt
  2829  		v_0 := v.Args[0]
  2830  		if v_0.Op != OpAMD64SHLQconst {
  2831  			break
  2832  		}
  2833  		if v_0.AuxInt != 1 {
  2834  			break
  2835  		}
  2836  		x := v_0.Args[0]
  2837  		v.reset(OpAMD64LEAQ1)
  2838  		v.AuxInt = c
  2839  		v.AddArg(x)
  2840  		v.AddArg(x)
  2841  		return true
  2842  	}
  2843  	// match: (ADDQconst [c] (LEAQ [d] {s} x))
  2844  	// cond: is32Bit(c+d)
  2845  	// result: (LEAQ [c+d] {s} x)
  2846  	for {
  2847  		c := v.AuxInt
  2848  		v_0 := v.Args[0]
  2849  		if v_0.Op != OpAMD64LEAQ {
  2850  			break
  2851  		}
  2852  		d := v_0.AuxInt
  2853  		s := v_0.Aux
  2854  		x := v_0.Args[0]
  2855  		if !(is32Bit(c + d)) {
  2856  			break
  2857  		}
  2858  		v.reset(OpAMD64LEAQ)
  2859  		v.AuxInt = c + d
  2860  		v.Aux = s
  2861  		v.AddArg(x)
  2862  		return true
  2863  	}
  2864  	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
  2865  	// cond: is32Bit(c+d)
  2866  	// result: (LEAQ1 [c+d] {s} x y)
  2867  	for {
  2868  		c := v.AuxInt
  2869  		v_0 := v.Args[0]
  2870  		if v_0.Op != OpAMD64LEAQ1 {
  2871  			break
  2872  		}
  2873  		d := v_0.AuxInt
  2874  		s := v_0.Aux
  2875  		_ = v_0.Args[1]
  2876  		x := v_0.Args[0]
  2877  		y := v_0.Args[1]
  2878  		if !(is32Bit(c + d)) {
  2879  			break
  2880  		}
  2881  		v.reset(OpAMD64LEAQ1)
  2882  		v.AuxInt = c + d
  2883  		v.Aux = s
  2884  		v.AddArg(x)
  2885  		v.AddArg(y)
  2886  		return true
  2887  	}
  2888  	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
  2889  	// cond: is32Bit(c+d)
  2890  	// result: (LEAQ2 [c+d] {s} x y)
  2891  	for {
  2892  		c := v.AuxInt
  2893  		v_0 := v.Args[0]
  2894  		if v_0.Op != OpAMD64LEAQ2 {
  2895  			break
  2896  		}
  2897  		d := v_0.AuxInt
  2898  		s := v_0.Aux
  2899  		_ = v_0.Args[1]
  2900  		x := v_0.Args[0]
  2901  		y := v_0.Args[1]
  2902  		if !(is32Bit(c + d)) {
  2903  			break
  2904  		}
  2905  		v.reset(OpAMD64LEAQ2)
  2906  		v.AuxInt = c + d
  2907  		v.Aux = s
  2908  		v.AddArg(x)
  2909  		v.AddArg(y)
  2910  		return true
  2911  	}
  2912  	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
  2913  	// cond: is32Bit(c+d)
  2914  	// result: (LEAQ4 [c+d] {s} x y)
  2915  	for {
  2916  		c := v.AuxInt
  2917  		v_0 := v.Args[0]
  2918  		if v_0.Op != OpAMD64LEAQ4 {
  2919  			break
  2920  		}
  2921  		d := v_0.AuxInt
  2922  		s := v_0.Aux
  2923  		_ = v_0.Args[1]
  2924  		x := v_0.Args[0]
  2925  		y := v_0.Args[1]
  2926  		if !(is32Bit(c + d)) {
  2927  			break
  2928  		}
  2929  		v.reset(OpAMD64LEAQ4)
  2930  		v.AuxInt = c + d
  2931  		v.Aux = s
  2932  		v.AddArg(x)
  2933  		v.AddArg(y)
  2934  		return true
  2935  	}
  2936  	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
  2937  	// cond: is32Bit(c+d)
  2938  	// result: (LEAQ8 [c+d] {s} x y)
  2939  	for {
  2940  		c := v.AuxInt
  2941  		v_0 := v.Args[0]
  2942  		if v_0.Op != OpAMD64LEAQ8 {
  2943  			break
  2944  		}
  2945  		d := v_0.AuxInt
  2946  		s := v_0.Aux
  2947  		_ = v_0.Args[1]
  2948  		x := v_0.Args[0]
  2949  		y := v_0.Args[1]
  2950  		if !(is32Bit(c + d)) {
  2951  			break
  2952  		}
  2953  		v.reset(OpAMD64LEAQ8)
  2954  		v.AuxInt = c + d
  2955  		v.Aux = s
  2956  		v.AddArg(x)
  2957  		v.AddArg(y)
  2958  		return true
  2959  	}
  2960  	// match: (ADDQconst [0] x)
  2961  	// cond:
  2962  	// result: x
  2963  	for {
  2964  		if v.AuxInt != 0 {
  2965  			break
  2966  		}
  2967  		x := v.Args[0]
  2968  		v.reset(OpCopy)
  2969  		v.Type = x.Type
  2970  		v.AddArg(x)
  2971  		return true
  2972  	}
  2973  	// match: (ADDQconst [c] (MOVQconst [d]))
  2974  	// cond:
  2975  	// result: (MOVQconst [c+d])
  2976  	for {
  2977  		c := v.AuxInt
  2978  		v_0 := v.Args[0]
  2979  		if v_0.Op != OpAMD64MOVQconst {
  2980  			break
  2981  		}
  2982  		d := v_0.AuxInt
  2983  		v.reset(OpAMD64MOVQconst)
  2984  		v.AuxInt = c + d
  2985  		return true
  2986  	}
  2987  	// match: (ADDQconst [c] (ADDQconst [d] x))
  2988  	// cond: is32Bit(c+d)
  2989  	// result: (ADDQconst [c+d] x)
  2990  	for {
  2991  		c := v.AuxInt
  2992  		v_0 := v.Args[0]
  2993  		if v_0.Op != OpAMD64ADDQconst {
  2994  			break
  2995  		}
  2996  		d := v_0.AuxInt
  2997  		x := v_0.Args[0]
  2998  		if !(is32Bit(c + d)) {
  2999  			break
  3000  		}
  3001  		v.reset(OpAMD64ADDQconst)
  3002  		v.AuxInt = c + d
  3003  		v.AddArg(x)
  3004  		return true
  3005  	}
  3006  	return false
  3007  }
  3008  func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool {
  3009  	// match: (ADDQconst [off] x:(SP))
  3010  	// cond:
  3011  	// result: (LEAQ [off] x)
  3012  	for {
  3013  		off := v.AuxInt
  3014  		x := v.Args[0]
  3015  		if x.Op != OpSP {
  3016  			break
  3017  		}
  3018  		v.reset(OpAMD64LEAQ)
  3019  		v.AuxInt = off
  3020  		v.AddArg(x)
  3021  		return true
  3022  	}
  3023  	return false
  3024  }
  3025  func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool {
  3026  	// match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  3027  	// cond: ValAndOff(valoff1).canAdd(off2)
  3028  	// result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  3029  	for {
  3030  		valoff1 := v.AuxInt
  3031  		sym := v.Aux
  3032  		_ = v.Args[1]
  3033  		v_0 := v.Args[0]
  3034  		if v_0.Op != OpAMD64ADDQconst {
  3035  			break
  3036  		}
  3037  		off2 := v_0.AuxInt
  3038  		base := v_0.Args[0]
  3039  		mem := v.Args[1]
  3040  		if !(ValAndOff(valoff1).canAdd(off2)) {
  3041  			break
  3042  		}
  3043  		v.reset(OpAMD64ADDQconstmodify)
  3044  		v.AuxInt = ValAndOff(valoff1).add(off2)
  3045  		v.Aux = sym
  3046  		v.AddArg(base)
  3047  		v.AddArg(mem)
  3048  		return true
  3049  	}
  3050  	// match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  3051  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  3052  	// result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  3053  	for {
  3054  		valoff1 := v.AuxInt
  3055  		sym1 := v.Aux
  3056  		_ = v.Args[1]
  3057  		v_0 := v.Args[0]
  3058  		if v_0.Op != OpAMD64LEAQ {
  3059  			break
  3060  		}
  3061  		off2 := v_0.AuxInt
  3062  		sym2 := v_0.Aux
  3063  		base := v_0.Args[0]
  3064  		mem := v.Args[1]
  3065  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  3066  			break
  3067  		}
  3068  		v.reset(OpAMD64ADDQconstmodify)
  3069  		v.AuxInt = ValAndOff(valoff1).add(off2)
  3070  		v.Aux = mergeSym(sym1, sym2)
  3071  		v.AddArg(base)
  3072  		v.AddArg(mem)
  3073  		return true
  3074  	}
  3075  	return false
  3076  }
  3077  func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool {
  3078  	b := v.Block
  3079  	_ = b
  3080  	typ := &b.Func.Config.Types
  3081  	_ = typ
  3082  	// match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
  3083  	// cond: is32Bit(off1+off2)
  3084  	// result: (ADDQload [off1+off2] {sym} val base mem)
  3085  	for {
  3086  		off1 := v.AuxInt
  3087  		sym := v.Aux
  3088  		_ = v.Args[2]
  3089  		val := v.Args[0]
  3090  		v_1 := v.Args[1]
  3091  		if v_1.Op != OpAMD64ADDQconst {
  3092  			break
  3093  		}
  3094  		off2 := v_1.AuxInt
  3095  		base := v_1.Args[0]
  3096  		mem := v.Args[2]
  3097  		if !(is32Bit(off1 + off2)) {
  3098  			break
  3099  		}
  3100  		v.reset(OpAMD64ADDQload)
  3101  		v.AuxInt = off1 + off2
  3102  		v.Aux = sym
  3103  		v.AddArg(val)
  3104  		v.AddArg(base)
  3105  		v.AddArg(mem)
  3106  		return true
  3107  	}
  3108  	// match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  3109  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3110  	// result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  3111  	for {
  3112  		off1 := v.AuxInt
  3113  		sym1 := v.Aux
  3114  		_ = v.Args[2]
  3115  		val := v.Args[0]
  3116  		v_1 := v.Args[1]
  3117  		if v_1.Op != OpAMD64LEAQ {
  3118  			break
  3119  		}
  3120  		off2 := v_1.AuxInt
  3121  		sym2 := v_1.Aux
  3122  		base := v_1.Args[0]
  3123  		mem := v.Args[2]
  3124  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3125  			break
  3126  		}
  3127  		v.reset(OpAMD64ADDQload)
  3128  		v.AuxInt = off1 + off2
  3129  		v.Aux = mergeSym(sym1, sym2)
  3130  		v.AddArg(val)
  3131  		v.AddArg(base)
  3132  		v.AddArg(mem)
  3133  		return true
  3134  	}
  3135  	// match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
  3136  	// cond:
  3137  	// result: (ADDQ x (MOVQf2i y))
  3138  	for {
  3139  		off := v.AuxInt
  3140  		sym := v.Aux
  3141  		_ = v.Args[2]
  3142  		x := v.Args[0]
  3143  		ptr := v.Args[1]
  3144  		v_2 := v.Args[2]
  3145  		if v_2.Op != OpAMD64MOVSDstore {
  3146  			break
  3147  		}
  3148  		if v_2.AuxInt != off {
  3149  			break
  3150  		}
  3151  		if v_2.Aux != sym {
  3152  			break
  3153  		}
  3154  		_ = v_2.Args[2]
  3155  		if ptr != v_2.Args[0] {
  3156  			break
  3157  		}
  3158  		y := v_2.Args[1]
  3159  		v.reset(OpAMD64ADDQ)
  3160  		v.AddArg(x)
  3161  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
  3162  		v0.AddArg(y)
  3163  		v.AddArg(v0)
  3164  		return true
  3165  	}
  3166  	return false
  3167  }
  3168  func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool {
  3169  	// match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  3170  	// cond: is32Bit(off1+off2)
  3171  	// result: (ADDQmodify [off1+off2] {sym} base val mem)
  3172  	for {
  3173  		off1 := v.AuxInt
  3174  		sym := v.Aux
  3175  		_ = v.Args[2]
  3176  		v_0 := v.Args[0]
  3177  		if v_0.Op != OpAMD64ADDQconst {
  3178  			break
  3179  		}
  3180  		off2 := v_0.AuxInt
  3181  		base := v_0.Args[0]
  3182  		val := v.Args[1]
  3183  		mem := v.Args[2]
  3184  		if !(is32Bit(off1 + off2)) {
  3185  			break
  3186  		}
  3187  		v.reset(OpAMD64ADDQmodify)
  3188  		v.AuxInt = off1 + off2
  3189  		v.Aux = sym
  3190  		v.AddArg(base)
  3191  		v.AddArg(val)
  3192  		v.AddArg(mem)
  3193  		return true
  3194  	}
  3195  	// match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  3196  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3197  	// result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  3198  	for {
  3199  		off1 := v.AuxInt
  3200  		sym1 := v.Aux
  3201  		_ = v.Args[2]
  3202  		v_0 := v.Args[0]
  3203  		if v_0.Op != OpAMD64LEAQ {
  3204  			break
  3205  		}
  3206  		off2 := v_0.AuxInt
  3207  		sym2 := v_0.Aux
  3208  		base := v_0.Args[0]
  3209  		val := v.Args[1]
  3210  		mem := v.Args[2]
  3211  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3212  			break
  3213  		}
  3214  		v.reset(OpAMD64ADDQmodify)
  3215  		v.AuxInt = off1 + off2
  3216  		v.Aux = mergeSym(sym1, sym2)
  3217  		v.AddArg(base)
  3218  		v.AddArg(val)
  3219  		v.AddArg(mem)
  3220  		return true
  3221  	}
  3222  	return false
  3223  }
  3224  func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool {
  3225  	// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
  3226  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3227  	// result: (ADDSDload x [off] {sym} ptr mem)
  3228  	for {
  3229  		_ = v.Args[1]
  3230  		x := v.Args[0]
  3231  		l := v.Args[1]
  3232  		if l.Op != OpAMD64MOVSDload {
  3233  			break
  3234  		}
  3235  		off := l.AuxInt
  3236  		sym := l.Aux
  3237  		_ = l.Args[1]
  3238  		ptr := l.Args[0]
  3239  		mem := l.Args[1]
  3240  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3241  			break
  3242  		}
  3243  		v.reset(OpAMD64ADDSDload)
  3244  		v.AuxInt = off
  3245  		v.Aux = sym
  3246  		v.AddArg(x)
  3247  		v.AddArg(ptr)
  3248  		v.AddArg(mem)
  3249  		return true
  3250  	}
  3251  	// match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x)
  3252  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3253  	// result: (ADDSDload x [off] {sym} ptr mem)
  3254  	for {
  3255  		_ = v.Args[1]
  3256  		l := v.Args[0]
  3257  		if l.Op != OpAMD64MOVSDload {
  3258  			break
  3259  		}
  3260  		off := l.AuxInt
  3261  		sym := l.Aux
  3262  		_ = l.Args[1]
  3263  		ptr := l.Args[0]
  3264  		mem := l.Args[1]
  3265  		x := v.Args[1]
  3266  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3267  			break
  3268  		}
  3269  		v.reset(OpAMD64ADDSDload)
  3270  		v.AuxInt = off
  3271  		v.Aux = sym
  3272  		v.AddArg(x)
  3273  		v.AddArg(ptr)
  3274  		v.AddArg(mem)
  3275  		return true
  3276  	}
  3277  	return false
  3278  }
  3279  func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool {
  3280  	b := v.Block
  3281  	_ = b
  3282  	typ := &b.Func.Config.Types
  3283  	_ = typ
  3284  	// match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
  3285  	// cond: is32Bit(off1+off2)
  3286  	// result: (ADDSDload [off1+off2] {sym} val base mem)
  3287  	for {
  3288  		off1 := v.AuxInt
  3289  		sym := v.Aux
  3290  		_ = v.Args[2]
  3291  		val := v.Args[0]
  3292  		v_1 := v.Args[1]
  3293  		if v_1.Op != OpAMD64ADDQconst {
  3294  			break
  3295  		}
  3296  		off2 := v_1.AuxInt
  3297  		base := v_1.Args[0]
  3298  		mem := v.Args[2]
  3299  		if !(is32Bit(off1 + off2)) {
  3300  			break
  3301  		}
  3302  		v.reset(OpAMD64ADDSDload)
  3303  		v.AuxInt = off1 + off2
  3304  		v.Aux = sym
  3305  		v.AddArg(val)
  3306  		v.AddArg(base)
  3307  		v.AddArg(mem)
  3308  		return true
  3309  	}
  3310  	// match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  3311  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3312  	// result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  3313  	for {
  3314  		off1 := v.AuxInt
  3315  		sym1 := v.Aux
  3316  		_ = v.Args[2]
  3317  		val := v.Args[0]
  3318  		v_1 := v.Args[1]
  3319  		if v_1.Op != OpAMD64LEAQ {
  3320  			break
  3321  		}
  3322  		off2 := v_1.AuxInt
  3323  		sym2 := v_1.Aux
  3324  		base := v_1.Args[0]
  3325  		mem := v.Args[2]
  3326  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3327  			break
  3328  		}
  3329  		v.reset(OpAMD64ADDSDload)
  3330  		v.AuxInt = off1 + off2
  3331  		v.Aux = mergeSym(sym1, sym2)
  3332  		v.AddArg(val)
  3333  		v.AddArg(base)
  3334  		v.AddArg(mem)
  3335  		return true
  3336  	}
  3337  	// match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
  3338  	// cond:
  3339  	// result: (ADDSD x (MOVQi2f y))
  3340  	for {
  3341  		off := v.AuxInt
  3342  		sym := v.Aux
  3343  		_ = v.Args[2]
  3344  		x := v.Args[0]
  3345  		ptr := v.Args[1]
  3346  		v_2 := v.Args[2]
  3347  		if v_2.Op != OpAMD64MOVQstore {
  3348  			break
  3349  		}
  3350  		if v_2.AuxInt != off {
  3351  			break
  3352  		}
  3353  		if v_2.Aux != sym {
  3354  			break
  3355  		}
  3356  		_ = v_2.Args[2]
  3357  		if ptr != v_2.Args[0] {
  3358  			break
  3359  		}
  3360  		y := v_2.Args[1]
  3361  		v.reset(OpAMD64ADDSD)
  3362  		v.AddArg(x)
  3363  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
  3364  		v0.AddArg(y)
  3365  		v.AddArg(v0)
  3366  		return true
  3367  	}
  3368  	return false
  3369  }
  3370  func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool {
  3371  	// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
  3372  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3373  	// result: (ADDSSload x [off] {sym} ptr mem)
  3374  	for {
  3375  		_ = v.Args[1]
  3376  		x := v.Args[0]
  3377  		l := v.Args[1]
  3378  		if l.Op != OpAMD64MOVSSload {
  3379  			break
  3380  		}
  3381  		off := l.AuxInt
  3382  		sym := l.Aux
  3383  		_ = l.Args[1]
  3384  		ptr := l.Args[0]
  3385  		mem := l.Args[1]
  3386  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3387  			break
  3388  		}
  3389  		v.reset(OpAMD64ADDSSload)
  3390  		v.AuxInt = off
  3391  		v.Aux = sym
  3392  		v.AddArg(x)
  3393  		v.AddArg(ptr)
  3394  		v.AddArg(mem)
  3395  		return true
  3396  	}
  3397  	// match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x)
  3398  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3399  	// result: (ADDSSload x [off] {sym} ptr mem)
  3400  	for {
  3401  		_ = v.Args[1]
  3402  		l := v.Args[0]
  3403  		if l.Op != OpAMD64MOVSSload {
  3404  			break
  3405  		}
  3406  		off := l.AuxInt
  3407  		sym := l.Aux
  3408  		_ = l.Args[1]
  3409  		ptr := l.Args[0]
  3410  		mem := l.Args[1]
  3411  		x := v.Args[1]
  3412  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3413  			break
  3414  		}
  3415  		v.reset(OpAMD64ADDSSload)
  3416  		v.AuxInt = off
  3417  		v.Aux = sym
  3418  		v.AddArg(x)
  3419  		v.AddArg(ptr)
  3420  		v.AddArg(mem)
  3421  		return true
  3422  	}
  3423  	return false
  3424  }
  3425  func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool {
  3426  	b := v.Block
  3427  	_ = b
  3428  	typ := &b.Func.Config.Types
  3429  	_ = typ
  3430  	// match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
  3431  	// cond: is32Bit(off1+off2)
  3432  	// result: (ADDSSload [off1+off2] {sym} val base mem)
  3433  	for {
  3434  		off1 := v.AuxInt
  3435  		sym := v.Aux
  3436  		_ = v.Args[2]
  3437  		val := v.Args[0]
  3438  		v_1 := v.Args[1]
  3439  		if v_1.Op != OpAMD64ADDQconst {
  3440  			break
  3441  		}
  3442  		off2 := v_1.AuxInt
  3443  		base := v_1.Args[0]
  3444  		mem := v.Args[2]
  3445  		if !(is32Bit(off1 + off2)) {
  3446  			break
  3447  		}
  3448  		v.reset(OpAMD64ADDSSload)
  3449  		v.AuxInt = off1 + off2
  3450  		v.Aux = sym
  3451  		v.AddArg(val)
  3452  		v.AddArg(base)
  3453  		v.AddArg(mem)
  3454  		return true
  3455  	}
  3456  	// match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  3457  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3458  	// result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  3459  	for {
  3460  		off1 := v.AuxInt
  3461  		sym1 := v.Aux
  3462  		_ = v.Args[2]
  3463  		val := v.Args[0]
  3464  		v_1 := v.Args[1]
  3465  		if v_1.Op != OpAMD64LEAQ {
  3466  			break
  3467  		}
  3468  		off2 := v_1.AuxInt
  3469  		sym2 := v_1.Aux
  3470  		base := v_1.Args[0]
  3471  		mem := v.Args[2]
  3472  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3473  			break
  3474  		}
  3475  		v.reset(OpAMD64ADDSSload)
  3476  		v.AuxInt = off1 + off2
  3477  		v.Aux = mergeSym(sym1, sym2)
  3478  		v.AddArg(val)
  3479  		v.AddArg(base)
  3480  		v.AddArg(mem)
  3481  		return true
  3482  	}
  3483  	// match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
  3484  	// cond:
  3485  	// result: (ADDSS x (MOVLi2f y))
  3486  	for {
  3487  		off := v.AuxInt
  3488  		sym := v.Aux
  3489  		_ = v.Args[2]
  3490  		x := v.Args[0]
  3491  		ptr := v.Args[1]
  3492  		v_2 := v.Args[2]
  3493  		if v_2.Op != OpAMD64MOVLstore {
  3494  			break
  3495  		}
  3496  		if v_2.AuxInt != off {
  3497  			break
  3498  		}
  3499  		if v_2.Aux != sym {
  3500  			break
  3501  		}
  3502  		_ = v_2.Args[2]
  3503  		if ptr != v_2.Args[0] {
  3504  			break
  3505  		}
  3506  		y := v_2.Args[1]
  3507  		v.reset(OpAMD64ADDSS)
  3508  		v.AddArg(x)
  3509  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
  3510  		v0.AddArg(y)
  3511  		v.AddArg(v0)
  3512  		return true
  3513  	}
  3514  	return false
  3515  }
  3516  func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool {
  3517  	b := v.Block
  3518  	_ = b
  3519  	config := b.Func.Config
  3520  	_ = config
  3521  	// match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
  3522  	// cond: !config.nacl
  3523  	// result: (BTRL x y)
  3524  	for {
  3525  		_ = v.Args[1]
  3526  		v_0 := v.Args[0]
  3527  		if v_0.Op != OpAMD64NOTL {
  3528  			break
  3529  		}
  3530  		v_0_0 := v_0.Args[0]
  3531  		if v_0_0.Op != OpAMD64SHLL {
  3532  			break
  3533  		}
  3534  		_ = v_0_0.Args[1]
  3535  		v_0_0_0 := v_0_0.Args[0]
  3536  		if v_0_0_0.Op != OpAMD64MOVLconst {
  3537  			break
  3538  		}
  3539  		if v_0_0_0.AuxInt != 1 {
  3540  			break
  3541  		}
  3542  		y := v_0_0.Args[1]
  3543  		x := v.Args[1]
  3544  		if !(!config.nacl) {
  3545  			break
  3546  		}
  3547  		v.reset(OpAMD64BTRL)
  3548  		v.AddArg(x)
  3549  		v.AddArg(y)
  3550  		return true
  3551  	}
  3552  	// match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y)))
  3553  	// cond: !config.nacl
  3554  	// result: (BTRL x y)
  3555  	for {
  3556  		_ = v.Args[1]
  3557  		x := v.Args[0]
  3558  		v_1 := v.Args[1]
  3559  		if v_1.Op != OpAMD64NOTL {
  3560  			break
  3561  		}
  3562  		v_1_0 := v_1.Args[0]
  3563  		if v_1_0.Op != OpAMD64SHLL {
  3564  			break
  3565  		}
  3566  		_ = v_1_0.Args[1]
  3567  		v_1_0_0 := v_1_0.Args[0]
  3568  		if v_1_0_0.Op != OpAMD64MOVLconst {
  3569  			break
  3570  		}
  3571  		if v_1_0_0.AuxInt != 1 {
  3572  			break
  3573  		}
  3574  		y := v_1_0.Args[1]
  3575  		if !(!config.nacl) {
  3576  			break
  3577  		}
  3578  		v.reset(OpAMD64BTRL)
  3579  		v.AddArg(x)
  3580  		v.AddArg(y)
  3581  		return true
  3582  	}
  3583  	// match: (ANDL (MOVLconst [c]) x)
  3584  	// cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  3585  	// result: (BTRLconst [log2uint32(^c)] x)
  3586  	for {
  3587  		_ = v.Args[1]
  3588  		v_0 := v.Args[0]
  3589  		if v_0.Op != OpAMD64MOVLconst {
  3590  			break
  3591  		}
  3592  		c := v_0.AuxInt
  3593  		x := v.Args[1]
  3594  		if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  3595  			break
  3596  		}
  3597  		v.reset(OpAMD64BTRLconst)
  3598  		v.AuxInt = log2uint32(^c)
  3599  		v.AddArg(x)
  3600  		return true
  3601  	}
  3602  	// match: (ANDL x (MOVLconst [c]))
  3603  	// cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  3604  	// result: (BTRLconst [log2uint32(^c)] x)
  3605  	for {
  3606  		_ = v.Args[1]
  3607  		x := v.Args[0]
  3608  		v_1 := v.Args[1]
  3609  		if v_1.Op != OpAMD64MOVLconst {
  3610  			break
  3611  		}
  3612  		c := v_1.AuxInt
  3613  		if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  3614  			break
  3615  		}
  3616  		v.reset(OpAMD64BTRLconst)
  3617  		v.AuxInt = log2uint32(^c)
  3618  		v.AddArg(x)
  3619  		return true
  3620  	}
  3621  	// match: (ANDL x (MOVLconst [c]))
  3622  	// cond:
  3623  	// result: (ANDLconst [c] x)
  3624  	for {
  3625  		_ = v.Args[1]
  3626  		x := v.Args[0]
  3627  		v_1 := v.Args[1]
  3628  		if v_1.Op != OpAMD64MOVLconst {
  3629  			break
  3630  		}
  3631  		c := v_1.AuxInt
  3632  		v.reset(OpAMD64ANDLconst)
  3633  		v.AuxInt = c
  3634  		v.AddArg(x)
  3635  		return true
  3636  	}
  3637  	// match: (ANDL (MOVLconst [c]) x)
  3638  	// cond:
  3639  	// result: (ANDLconst [c] x)
  3640  	for {
  3641  		_ = v.Args[1]
  3642  		v_0 := v.Args[0]
  3643  		if v_0.Op != OpAMD64MOVLconst {
  3644  			break
  3645  		}
  3646  		c := v_0.AuxInt
  3647  		x := v.Args[1]
  3648  		v.reset(OpAMD64ANDLconst)
  3649  		v.AuxInt = c
  3650  		v.AddArg(x)
  3651  		return true
  3652  	}
  3653  	// match: (ANDL x x)
  3654  	// cond:
  3655  	// result: x
  3656  	for {
  3657  		_ = v.Args[1]
  3658  		x := v.Args[0]
  3659  		if x != v.Args[1] {
  3660  			break
  3661  		}
  3662  		v.reset(OpCopy)
  3663  		v.Type = x.Type
  3664  		v.AddArg(x)
  3665  		return true
  3666  	}
  3667  	// match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
  3668  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3669  	// result: (ANDLload x [off] {sym} ptr mem)
  3670  	for {
  3671  		_ = v.Args[1]
  3672  		x := v.Args[0]
  3673  		l := v.Args[1]
  3674  		if l.Op != OpAMD64MOVLload {
  3675  			break
  3676  		}
  3677  		off := l.AuxInt
  3678  		sym := l.Aux
  3679  		_ = l.Args[1]
  3680  		ptr := l.Args[0]
  3681  		mem := l.Args[1]
  3682  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3683  			break
  3684  		}
  3685  		v.reset(OpAMD64ANDLload)
  3686  		v.AuxInt = off
  3687  		v.Aux = sym
  3688  		v.AddArg(x)
  3689  		v.AddArg(ptr)
  3690  		v.AddArg(mem)
  3691  		return true
  3692  	}
  3693  	// match: (ANDL l:(MOVLload [off] {sym} ptr mem) x)
  3694  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  3695  	// result: (ANDLload x [off] {sym} ptr mem)
  3696  	for {
  3697  		_ = v.Args[1]
  3698  		l := v.Args[0]
  3699  		if l.Op != OpAMD64MOVLload {
  3700  			break
  3701  		}
  3702  		off := l.AuxInt
  3703  		sym := l.Aux
  3704  		_ = l.Args[1]
  3705  		ptr := l.Args[0]
  3706  		mem := l.Args[1]
  3707  		x := v.Args[1]
  3708  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  3709  			break
  3710  		}
  3711  		v.reset(OpAMD64ANDLload)
  3712  		v.AuxInt = off
  3713  		v.Aux = sym
  3714  		v.AddArg(x)
  3715  		v.AddArg(ptr)
  3716  		v.AddArg(mem)
  3717  		return true
  3718  	}
  3719  	return false
  3720  }
  3721  func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool {
  3722  	b := v.Block
  3723  	_ = b
  3724  	config := b.Func.Config
  3725  	_ = config
  3726  	// match: (ANDLconst [c] x)
  3727  	// cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  3728  	// result: (BTRLconst [log2uint32(^c)] x)
  3729  	for {
  3730  		c := v.AuxInt
  3731  		x := v.Args[0]
  3732  		if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  3733  			break
  3734  		}
  3735  		v.reset(OpAMD64BTRLconst)
  3736  		v.AuxInt = log2uint32(^c)
  3737  		v.AddArg(x)
  3738  		return true
  3739  	}
  3740  	// match: (ANDLconst [c] (ANDLconst [d] x))
  3741  	// cond:
  3742  	// result: (ANDLconst [c & d] x)
  3743  	for {
  3744  		c := v.AuxInt
  3745  		v_0 := v.Args[0]
  3746  		if v_0.Op != OpAMD64ANDLconst {
  3747  			break
  3748  		}
  3749  		d := v_0.AuxInt
  3750  		x := v_0.Args[0]
  3751  		v.reset(OpAMD64ANDLconst)
  3752  		v.AuxInt = c & d
  3753  		v.AddArg(x)
  3754  		return true
  3755  	}
  3756  	// match: (ANDLconst [c] (BTRLconst [d] x))
  3757  	// cond:
  3758  	// result: (ANDLconst [c &^ (1<<uint32(d))] x)
  3759  	for {
  3760  		c := v.AuxInt
  3761  		v_0 := v.Args[0]
  3762  		if v_0.Op != OpAMD64BTRLconst {
  3763  			break
  3764  		}
  3765  		d := v_0.AuxInt
  3766  		x := v_0.Args[0]
  3767  		v.reset(OpAMD64ANDLconst)
  3768  		v.AuxInt = c &^ (1 << uint32(d))
  3769  		v.AddArg(x)
  3770  		return true
  3771  	}
  3772  	// match: (ANDLconst [ 0xFF] x)
  3773  	// cond:
  3774  	// result: (MOVBQZX x)
  3775  	for {
  3776  		if v.AuxInt != 0xFF {
  3777  			break
  3778  		}
  3779  		x := v.Args[0]
  3780  		v.reset(OpAMD64MOVBQZX)
  3781  		v.AddArg(x)
  3782  		return true
  3783  	}
  3784  	// match: (ANDLconst [0xFFFF] x)
  3785  	// cond:
  3786  	// result: (MOVWQZX x)
  3787  	for {
  3788  		if v.AuxInt != 0xFFFF {
  3789  			break
  3790  		}
  3791  		x := v.Args[0]
  3792  		v.reset(OpAMD64MOVWQZX)
  3793  		v.AddArg(x)
  3794  		return true
  3795  	}
  3796  	// match: (ANDLconst [c] _)
  3797  	// cond: int32(c)==0
  3798  	// result: (MOVLconst [0])
  3799  	for {
  3800  		c := v.AuxInt
  3801  		if !(int32(c) == 0) {
  3802  			break
  3803  		}
  3804  		v.reset(OpAMD64MOVLconst)
  3805  		v.AuxInt = 0
  3806  		return true
  3807  	}
  3808  	// match: (ANDLconst [c] x)
  3809  	// cond: int32(c)==-1
  3810  	// result: x
  3811  	for {
  3812  		c := v.AuxInt
  3813  		x := v.Args[0]
  3814  		if !(int32(c) == -1) {
  3815  			break
  3816  		}
  3817  		v.reset(OpCopy)
  3818  		v.Type = x.Type
  3819  		v.AddArg(x)
  3820  		return true
  3821  	}
  3822  	// match: (ANDLconst [c] (MOVLconst [d]))
  3823  	// cond:
  3824  	// result: (MOVLconst [c&d])
  3825  	for {
  3826  		c := v.AuxInt
  3827  		v_0 := v.Args[0]
  3828  		if v_0.Op != OpAMD64MOVLconst {
  3829  			break
  3830  		}
  3831  		d := v_0.AuxInt
  3832  		v.reset(OpAMD64MOVLconst)
  3833  		v.AuxInt = c & d
  3834  		return true
  3835  	}
  3836  	return false
  3837  }
  3838  func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool {
  3839  	// match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  3840  	// cond: ValAndOff(valoff1).canAdd(off2)
  3841  	// result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  3842  	for {
  3843  		valoff1 := v.AuxInt
  3844  		sym := v.Aux
  3845  		_ = v.Args[1]
  3846  		v_0 := v.Args[0]
  3847  		if v_0.Op != OpAMD64ADDQconst {
  3848  			break
  3849  		}
  3850  		off2 := v_0.AuxInt
  3851  		base := v_0.Args[0]
  3852  		mem := v.Args[1]
  3853  		if !(ValAndOff(valoff1).canAdd(off2)) {
  3854  			break
  3855  		}
  3856  		v.reset(OpAMD64ANDLconstmodify)
  3857  		v.AuxInt = ValAndOff(valoff1).add(off2)
  3858  		v.Aux = sym
  3859  		v.AddArg(base)
  3860  		v.AddArg(mem)
  3861  		return true
  3862  	}
  3863  	// match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  3864  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  3865  	// result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  3866  	for {
  3867  		valoff1 := v.AuxInt
  3868  		sym1 := v.Aux
  3869  		_ = v.Args[1]
  3870  		v_0 := v.Args[0]
  3871  		if v_0.Op != OpAMD64LEAQ {
  3872  			break
  3873  		}
  3874  		off2 := v_0.AuxInt
  3875  		sym2 := v_0.Aux
  3876  		base := v_0.Args[0]
  3877  		mem := v.Args[1]
  3878  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  3879  			break
  3880  		}
  3881  		v.reset(OpAMD64ANDLconstmodify)
  3882  		v.AuxInt = ValAndOff(valoff1).add(off2)
  3883  		v.Aux = mergeSym(sym1, sym2)
  3884  		v.AddArg(base)
  3885  		v.AddArg(mem)
  3886  		return true
  3887  	}
  3888  	return false
  3889  }
  3890  func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool {
  3891  	b := v.Block
  3892  	_ = b
  3893  	typ := &b.Func.Config.Types
  3894  	_ = typ
  3895  	// match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
  3896  	// cond: is32Bit(off1+off2)
  3897  	// result: (ANDLload [off1+off2] {sym} val base mem)
  3898  	for {
  3899  		off1 := v.AuxInt
  3900  		sym := v.Aux
  3901  		_ = v.Args[2]
  3902  		val := v.Args[0]
  3903  		v_1 := v.Args[1]
  3904  		if v_1.Op != OpAMD64ADDQconst {
  3905  			break
  3906  		}
  3907  		off2 := v_1.AuxInt
  3908  		base := v_1.Args[0]
  3909  		mem := v.Args[2]
  3910  		if !(is32Bit(off1 + off2)) {
  3911  			break
  3912  		}
  3913  		v.reset(OpAMD64ANDLload)
  3914  		v.AuxInt = off1 + off2
  3915  		v.Aux = sym
  3916  		v.AddArg(val)
  3917  		v.AddArg(base)
  3918  		v.AddArg(mem)
  3919  		return true
  3920  	}
  3921  	// match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  3922  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  3923  	// result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  3924  	for {
  3925  		off1 := v.AuxInt
  3926  		sym1 := v.Aux
  3927  		_ = v.Args[2]
  3928  		val := v.Args[0]
  3929  		v_1 := v.Args[1]
  3930  		if v_1.Op != OpAMD64LEAQ {
  3931  			break
  3932  		}
  3933  		off2 := v_1.AuxInt
  3934  		sym2 := v_1.Aux
  3935  		base := v_1.Args[0]
  3936  		mem := v.Args[2]
  3937  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  3938  			break
  3939  		}
  3940  		v.reset(OpAMD64ANDLload)
  3941  		v.AuxInt = off1 + off2
  3942  		v.Aux = mergeSym(sym1, sym2)
  3943  		v.AddArg(val)
  3944  		v.AddArg(base)
  3945  		v.AddArg(mem)
  3946  		return true
  3947  	}
  3948  	// match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
  3949  	// cond:
  3950  	// result: (ANDL x (MOVLf2i y))
  3951  	for {
  3952  		off := v.AuxInt
  3953  		sym := v.Aux
  3954  		_ = v.Args[2]
  3955  		x := v.Args[0]
  3956  		ptr := v.Args[1]
  3957  		v_2 := v.Args[2]
  3958  		if v_2.Op != OpAMD64MOVSSstore {
  3959  			break
  3960  		}
  3961  		if v_2.AuxInt != off {
  3962  			break
  3963  		}
  3964  		if v_2.Aux != sym {
  3965  			break
  3966  		}
  3967  		_ = v_2.Args[2]
  3968  		if ptr != v_2.Args[0] {
  3969  			break
  3970  		}
  3971  		y := v_2.Args[1]
  3972  		v.reset(OpAMD64ANDL)
  3973  		v.AddArg(x)
  3974  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
  3975  		v0.AddArg(y)
  3976  		v.AddArg(v0)
  3977  		return true
  3978  	}
  3979  	return false
  3980  }
  3981  func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool {
  3982  	// match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  3983  	// cond: is32Bit(off1+off2)
  3984  	// result: (ANDLmodify [off1+off2] {sym} base val mem)
  3985  	for {
  3986  		off1 := v.AuxInt
  3987  		sym := v.Aux
  3988  		_ = v.Args[2]
  3989  		v_0 := v.Args[0]
  3990  		if v_0.Op != OpAMD64ADDQconst {
  3991  			break
  3992  		}
  3993  		off2 := v_0.AuxInt
  3994  		base := v_0.Args[0]
  3995  		val := v.Args[1]
  3996  		mem := v.Args[2]
  3997  		if !(is32Bit(off1 + off2)) {
  3998  			break
  3999  		}
  4000  		v.reset(OpAMD64ANDLmodify)
  4001  		v.AuxInt = off1 + off2
  4002  		v.Aux = sym
  4003  		v.AddArg(base)
  4004  		v.AddArg(val)
  4005  		v.AddArg(mem)
  4006  		return true
  4007  	}
  4008  	// match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  4009  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4010  	// result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  4011  	for {
  4012  		off1 := v.AuxInt
  4013  		sym1 := v.Aux
  4014  		_ = v.Args[2]
  4015  		v_0 := v.Args[0]
  4016  		if v_0.Op != OpAMD64LEAQ {
  4017  			break
  4018  		}
  4019  		off2 := v_0.AuxInt
  4020  		sym2 := v_0.Aux
  4021  		base := v_0.Args[0]
  4022  		val := v.Args[1]
  4023  		mem := v.Args[2]
  4024  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4025  			break
  4026  		}
  4027  		v.reset(OpAMD64ANDLmodify)
  4028  		v.AuxInt = off1 + off2
  4029  		v.Aux = mergeSym(sym1, sym2)
  4030  		v.AddArg(base)
  4031  		v.AddArg(val)
  4032  		v.AddArg(mem)
  4033  		return true
  4034  	}
  4035  	return false
  4036  }
  4037  func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool {
  4038  	b := v.Block
  4039  	_ = b
  4040  	config := b.Func.Config
  4041  	_ = config
  4042  	// match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
  4043  	// cond: !config.nacl
  4044  	// result: (BTRQ x y)
  4045  	for {
  4046  		_ = v.Args[1]
  4047  		v_0 := v.Args[0]
  4048  		if v_0.Op != OpAMD64NOTQ {
  4049  			break
  4050  		}
  4051  		v_0_0 := v_0.Args[0]
  4052  		if v_0_0.Op != OpAMD64SHLQ {
  4053  			break
  4054  		}
  4055  		_ = v_0_0.Args[1]
  4056  		v_0_0_0 := v_0_0.Args[0]
  4057  		if v_0_0_0.Op != OpAMD64MOVQconst {
  4058  			break
  4059  		}
  4060  		if v_0_0_0.AuxInt != 1 {
  4061  			break
  4062  		}
  4063  		y := v_0_0.Args[1]
  4064  		x := v.Args[1]
  4065  		if !(!config.nacl) {
  4066  			break
  4067  		}
  4068  		v.reset(OpAMD64BTRQ)
  4069  		v.AddArg(x)
  4070  		v.AddArg(y)
  4071  		return true
  4072  	}
  4073  	// match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y)))
  4074  	// cond: !config.nacl
  4075  	// result: (BTRQ x y)
  4076  	for {
  4077  		_ = v.Args[1]
  4078  		x := v.Args[0]
  4079  		v_1 := v.Args[1]
  4080  		if v_1.Op != OpAMD64NOTQ {
  4081  			break
  4082  		}
  4083  		v_1_0 := v_1.Args[0]
  4084  		if v_1_0.Op != OpAMD64SHLQ {
  4085  			break
  4086  		}
  4087  		_ = v_1_0.Args[1]
  4088  		v_1_0_0 := v_1_0.Args[0]
  4089  		if v_1_0_0.Op != OpAMD64MOVQconst {
  4090  			break
  4091  		}
  4092  		if v_1_0_0.AuxInt != 1 {
  4093  			break
  4094  		}
  4095  		y := v_1_0.Args[1]
  4096  		if !(!config.nacl) {
  4097  			break
  4098  		}
  4099  		v.reset(OpAMD64BTRQ)
  4100  		v.AddArg(x)
  4101  		v.AddArg(y)
  4102  		return true
  4103  	}
  4104  	// match: (ANDQ (MOVQconst [c]) x)
  4105  	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  4106  	// result: (BTRQconst [log2(^c)] x)
  4107  	for {
  4108  		_ = v.Args[1]
  4109  		v_0 := v.Args[0]
  4110  		if v_0.Op != OpAMD64MOVQconst {
  4111  			break
  4112  		}
  4113  		c := v_0.AuxInt
  4114  		x := v.Args[1]
  4115  		if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  4116  			break
  4117  		}
  4118  		v.reset(OpAMD64BTRQconst)
  4119  		v.AuxInt = log2(^c)
  4120  		v.AddArg(x)
  4121  		return true
  4122  	}
  4123  	// match: (ANDQ x (MOVQconst [c]))
  4124  	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  4125  	// result: (BTRQconst [log2(^c)] x)
  4126  	for {
  4127  		_ = v.Args[1]
  4128  		x := v.Args[0]
  4129  		v_1 := v.Args[1]
  4130  		if v_1.Op != OpAMD64MOVQconst {
  4131  			break
  4132  		}
  4133  		c := v_1.AuxInt
  4134  		if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  4135  			break
  4136  		}
  4137  		v.reset(OpAMD64BTRQconst)
  4138  		v.AuxInt = log2(^c)
  4139  		v.AddArg(x)
  4140  		return true
  4141  	}
  4142  	// match: (ANDQ x (MOVQconst [c]))
  4143  	// cond: is32Bit(c)
  4144  	// result: (ANDQconst [c] x)
  4145  	for {
  4146  		_ = v.Args[1]
  4147  		x := v.Args[0]
  4148  		v_1 := v.Args[1]
  4149  		if v_1.Op != OpAMD64MOVQconst {
  4150  			break
  4151  		}
  4152  		c := v_1.AuxInt
  4153  		if !(is32Bit(c)) {
  4154  			break
  4155  		}
  4156  		v.reset(OpAMD64ANDQconst)
  4157  		v.AuxInt = c
  4158  		v.AddArg(x)
  4159  		return true
  4160  	}
  4161  	// match: (ANDQ (MOVQconst [c]) x)
  4162  	// cond: is32Bit(c)
  4163  	// result: (ANDQconst [c] x)
  4164  	for {
  4165  		_ = v.Args[1]
  4166  		v_0 := v.Args[0]
  4167  		if v_0.Op != OpAMD64MOVQconst {
  4168  			break
  4169  		}
  4170  		c := v_0.AuxInt
  4171  		x := v.Args[1]
  4172  		if !(is32Bit(c)) {
  4173  			break
  4174  		}
  4175  		v.reset(OpAMD64ANDQconst)
  4176  		v.AuxInt = c
  4177  		v.AddArg(x)
  4178  		return true
  4179  	}
  4180  	// match: (ANDQ x x)
  4181  	// cond:
  4182  	// result: x
  4183  	for {
  4184  		_ = v.Args[1]
  4185  		x := v.Args[0]
  4186  		if x != v.Args[1] {
  4187  			break
  4188  		}
  4189  		v.reset(OpCopy)
  4190  		v.Type = x.Type
  4191  		v.AddArg(x)
  4192  		return true
  4193  	}
  4194  	// match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
  4195  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  4196  	// result: (ANDQload x [off] {sym} ptr mem)
  4197  	for {
  4198  		_ = v.Args[1]
  4199  		x := v.Args[0]
  4200  		l := v.Args[1]
  4201  		if l.Op != OpAMD64MOVQload {
  4202  			break
  4203  		}
  4204  		off := l.AuxInt
  4205  		sym := l.Aux
  4206  		_ = l.Args[1]
  4207  		ptr := l.Args[0]
  4208  		mem := l.Args[1]
  4209  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  4210  			break
  4211  		}
  4212  		v.reset(OpAMD64ANDQload)
  4213  		v.AuxInt = off
  4214  		v.Aux = sym
  4215  		v.AddArg(x)
  4216  		v.AddArg(ptr)
  4217  		v.AddArg(mem)
  4218  		return true
  4219  	}
  4220  	// match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x)
  4221  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
  4222  	// result: (ANDQload x [off] {sym} ptr mem)
  4223  	for {
  4224  		_ = v.Args[1]
  4225  		l := v.Args[0]
  4226  		if l.Op != OpAMD64MOVQload {
  4227  			break
  4228  		}
  4229  		off := l.AuxInt
  4230  		sym := l.Aux
  4231  		_ = l.Args[1]
  4232  		ptr := l.Args[0]
  4233  		mem := l.Args[1]
  4234  		x := v.Args[1]
  4235  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
  4236  			break
  4237  		}
  4238  		v.reset(OpAMD64ANDQload)
  4239  		v.AuxInt = off
  4240  		v.Aux = sym
  4241  		v.AddArg(x)
  4242  		v.AddArg(ptr)
  4243  		v.AddArg(mem)
  4244  		return true
  4245  	}
  4246  	return false
  4247  }
  4248  func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool {
  4249  	b := v.Block
  4250  	_ = b
  4251  	config := b.Func.Config
  4252  	_ = config
  4253  	// match: (ANDQconst [c] x)
  4254  	// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
  4255  	// result: (BTRQconst [log2(^c)] x)
  4256  	for {
  4257  		c := v.AuxInt
  4258  		x := v.Args[0]
  4259  		if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
  4260  			break
  4261  		}
  4262  		v.reset(OpAMD64BTRQconst)
  4263  		v.AuxInt = log2(^c)
  4264  		v.AddArg(x)
  4265  		return true
  4266  	}
  4267  	// match: (ANDQconst [c] (ANDQconst [d] x))
  4268  	// cond:
  4269  	// result: (ANDQconst [c & d] x)
  4270  	for {
  4271  		c := v.AuxInt
  4272  		v_0 := v.Args[0]
  4273  		if v_0.Op != OpAMD64ANDQconst {
  4274  			break
  4275  		}
  4276  		d := v_0.AuxInt
  4277  		x := v_0.Args[0]
  4278  		v.reset(OpAMD64ANDQconst)
  4279  		v.AuxInt = c & d
  4280  		v.AddArg(x)
  4281  		return true
  4282  	}
  4283  	// match: (ANDQconst [c] (BTRQconst [d] x))
  4284  	// cond:
  4285  	// result: (ANDQconst [c &^ (1<<uint32(d))] x)
  4286  	for {
  4287  		c := v.AuxInt
  4288  		v_0 := v.Args[0]
  4289  		if v_0.Op != OpAMD64BTRQconst {
  4290  			break
  4291  		}
  4292  		d := v_0.AuxInt
  4293  		x := v_0.Args[0]
  4294  		v.reset(OpAMD64ANDQconst)
  4295  		v.AuxInt = c &^ (1 << uint32(d))
  4296  		v.AddArg(x)
  4297  		return true
  4298  	}
  4299  	// match: (ANDQconst [ 0xFF] x)
  4300  	// cond:
  4301  	// result: (MOVBQZX x)
  4302  	for {
  4303  		if v.AuxInt != 0xFF {
  4304  			break
  4305  		}
  4306  		x := v.Args[0]
  4307  		v.reset(OpAMD64MOVBQZX)
  4308  		v.AddArg(x)
  4309  		return true
  4310  	}
  4311  	// match: (ANDQconst [0xFFFF] x)
  4312  	// cond:
  4313  	// result: (MOVWQZX x)
  4314  	for {
  4315  		if v.AuxInt != 0xFFFF {
  4316  			break
  4317  		}
  4318  		x := v.Args[0]
  4319  		v.reset(OpAMD64MOVWQZX)
  4320  		v.AddArg(x)
  4321  		return true
  4322  	}
  4323  	// match: (ANDQconst [0xFFFFFFFF] x)
  4324  	// cond:
  4325  	// result: (MOVLQZX x)
  4326  	for {
  4327  		if v.AuxInt != 0xFFFFFFFF {
  4328  			break
  4329  		}
  4330  		x := v.Args[0]
  4331  		v.reset(OpAMD64MOVLQZX)
  4332  		v.AddArg(x)
  4333  		return true
  4334  	}
  4335  	// match: (ANDQconst [0] _)
  4336  	// cond:
  4337  	// result: (MOVQconst [0])
  4338  	for {
  4339  		if v.AuxInt != 0 {
  4340  			break
  4341  		}
  4342  		v.reset(OpAMD64MOVQconst)
  4343  		v.AuxInt = 0
  4344  		return true
  4345  	}
  4346  	// match: (ANDQconst [-1] x)
  4347  	// cond:
  4348  	// result: x
  4349  	for {
  4350  		if v.AuxInt != -1 {
  4351  			break
  4352  		}
  4353  		x := v.Args[0]
  4354  		v.reset(OpCopy)
  4355  		v.Type = x.Type
  4356  		v.AddArg(x)
  4357  		return true
  4358  	}
  4359  	// match: (ANDQconst [c] (MOVQconst [d]))
  4360  	// cond:
  4361  	// result: (MOVQconst [c&d])
  4362  	for {
  4363  		c := v.AuxInt
  4364  		v_0 := v.Args[0]
  4365  		if v_0.Op != OpAMD64MOVQconst {
  4366  			break
  4367  		}
  4368  		d := v_0.AuxInt
  4369  		v.reset(OpAMD64MOVQconst)
  4370  		v.AuxInt = c & d
  4371  		return true
  4372  	}
  4373  	return false
  4374  }
  4375  func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool {
  4376  	// match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  4377  	// cond: ValAndOff(valoff1).canAdd(off2)
  4378  	// result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  4379  	for {
  4380  		valoff1 := v.AuxInt
  4381  		sym := v.Aux
  4382  		_ = v.Args[1]
  4383  		v_0 := v.Args[0]
  4384  		if v_0.Op != OpAMD64ADDQconst {
  4385  			break
  4386  		}
  4387  		off2 := v_0.AuxInt
  4388  		base := v_0.Args[0]
  4389  		mem := v.Args[1]
  4390  		if !(ValAndOff(valoff1).canAdd(off2)) {
  4391  			break
  4392  		}
  4393  		v.reset(OpAMD64ANDQconstmodify)
  4394  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4395  		v.Aux = sym
  4396  		v.AddArg(base)
  4397  		v.AddArg(mem)
  4398  		return true
  4399  	}
  4400  	// match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  4401  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  4402  	// result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  4403  	for {
  4404  		valoff1 := v.AuxInt
  4405  		sym1 := v.Aux
  4406  		_ = v.Args[1]
  4407  		v_0 := v.Args[0]
  4408  		if v_0.Op != OpAMD64LEAQ {
  4409  			break
  4410  		}
  4411  		off2 := v_0.AuxInt
  4412  		sym2 := v_0.Aux
  4413  		base := v_0.Args[0]
  4414  		mem := v.Args[1]
  4415  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  4416  			break
  4417  		}
  4418  		v.reset(OpAMD64ANDQconstmodify)
  4419  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4420  		v.Aux = mergeSym(sym1, sym2)
  4421  		v.AddArg(base)
  4422  		v.AddArg(mem)
  4423  		return true
  4424  	}
  4425  	return false
  4426  }
  4427  func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool {
  4428  	b := v.Block
  4429  	_ = b
  4430  	typ := &b.Func.Config.Types
  4431  	_ = typ
  4432  	// match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
  4433  	// cond: is32Bit(off1+off2)
  4434  	// result: (ANDQload [off1+off2] {sym} val base mem)
  4435  	for {
  4436  		off1 := v.AuxInt
  4437  		sym := v.Aux
  4438  		_ = v.Args[2]
  4439  		val := v.Args[0]
  4440  		v_1 := v.Args[1]
  4441  		if v_1.Op != OpAMD64ADDQconst {
  4442  			break
  4443  		}
  4444  		off2 := v_1.AuxInt
  4445  		base := v_1.Args[0]
  4446  		mem := v.Args[2]
  4447  		if !(is32Bit(off1 + off2)) {
  4448  			break
  4449  		}
  4450  		v.reset(OpAMD64ANDQload)
  4451  		v.AuxInt = off1 + off2
  4452  		v.Aux = sym
  4453  		v.AddArg(val)
  4454  		v.AddArg(base)
  4455  		v.AddArg(mem)
  4456  		return true
  4457  	}
  4458  	// match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  4459  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4460  	// result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  4461  	for {
  4462  		off1 := v.AuxInt
  4463  		sym1 := v.Aux
  4464  		_ = v.Args[2]
  4465  		val := v.Args[0]
  4466  		v_1 := v.Args[1]
  4467  		if v_1.Op != OpAMD64LEAQ {
  4468  			break
  4469  		}
  4470  		off2 := v_1.AuxInt
  4471  		sym2 := v_1.Aux
  4472  		base := v_1.Args[0]
  4473  		mem := v.Args[2]
  4474  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4475  			break
  4476  		}
  4477  		v.reset(OpAMD64ANDQload)
  4478  		v.AuxInt = off1 + off2
  4479  		v.Aux = mergeSym(sym1, sym2)
  4480  		v.AddArg(val)
  4481  		v.AddArg(base)
  4482  		v.AddArg(mem)
  4483  		return true
  4484  	}
  4485  	// match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
  4486  	// cond:
  4487  	// result: (ANDQ x (MOVQf2i y))
  4488  	for {
  4489  		off := v.AuxInt
  4490  		sym := v.Aux
  4491  		_ = v.Args[2]
  4492  		x := v.Args[0]
  4493  		ptr := v.Args[1]
  4494  		v_2 := v.Args[2]
  4495  		if v_2.Op != OpAMD64MOVSDstore {
  4496  			break
  4497  		}
  4498  		if v_2.AuxInt != off {
  4499  			break
  4500  		}
  4501  		if v_2.Aux != sym {
  4502  			break
  4503  		}
  4504  		_ = v_2.Args[2]
  4505  		if ptr != v_2.Args[0] {
  4506  			break
  4507  		}
  4508  		y := v_2.Args[1]
  4509  		v.reset(OpAMD64ANDQ)
  4510  		v.AddArg(x)
  4511  		v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
  4512  		v0.AddArg(y)
  4513  		v.AddArg(v0)
  4514  		return true
  4515  	}
  4516  	return false
  4517  }
  4518  func rewriteValueAMD64_OpAMD64ANDQmodify_0(v *Value) bool {
  4519  	// match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  4520  	// cond: is32Bit(off1+off2)
  4521  	// result: (ANDQmodify [off1+off2] {sym} base val mem)
  4522  	for {
  4523  		off1 := v.AuxInt
  4524  		sym := v.Aux
  4525  		_ = v.Args[2]
  4526  		v_0 := v.Args[0]
  4527  		if v_0.Op != OpAMD64ADDQconst {
  4528  			break
  4529  		}
  4530  		off2 := v_0.AuxInt
  4531  		base := v_0.Args[0]
  4532  		val := v.Args[1]
  4533  		mem := v.Args[2]
  4534  		if !(is32Bit(off1 + off2)) {
  4535  			break
  4536  		}
  4537  		v.reset(OpAMD64ANDQmodify)
  4538  		v.AuxInt = off1 + off2
  4539  		v.Aux = sym
  4540  		v.AddArg(base)
  4541  		v.AddArg(val)
  4542  		v.AddArg(mem)
  4543  		return true
  4544  	}
  4545  	// match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  4546  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4547  	// result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  4548  	for {
  4549  		off1 := v.AuxInt
  4550  		sym1 := v.Aux
  4551  		_ = v.Args[2]
  4552  		v_0 := v.Args[0]
  4553  		if v_0.Op != OpAMD64LEAQ {
  4554  			break
  4555  		}
  4556  		off2 := v_0.AuxInt
  4557  		sym2 := v_0.Aux
  4558  		base := v_0.Args[0]
  4559  		val := v.Args[1]
  4560  		mem := v.Args[2]
  4561  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4562  			break
  4563  		}
  4564  		v.reset(OpAMD64ANDQmodify)
  4565  		v.AuxInt = off1 + off2
  4566  		v.Aux = mergeSym(sym1, sym2)
  4567  		v.AddArg(base)
  4568  		v.AddArg(val)
  4569  		v.AddArg(mem)
  4570  		return true
  4571  	}
  4572  	return false
  4573  }
  4574  func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool {
  4575  	b := v.Block
  4576  	_ = b
  4577  	// match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
  4578  	// cond:
  4579  	// result: (BSFQ (ORQconst <t> [1<<8] x))
  4580  	for {
  4581  		v_0 := v.Args[0]
  4582  		if v_0.Op != OpAMD64ORQconst {
  4583  			break
  4584  		}
  4585  		t := v_0.Type
  4586  		if v_0.AuxInt != 1<<8 {
  4587  			break
  4588  		}
  4589  		v_0_0 := v_0.Args[0]
  4590  		if v_0_0.Op != OpAMD64MOVBQZX {
  4591  			break
  4592  		}
  4593  		x := v_0_0.Args[0]
  4594  		v.reset(OpAMD64BSFQ)
  4595  		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
  4596  		v0.AuxInt = 1 << 8
  4597  		v0.AddArg(x)
  4598  		v.AddArg(v0)
  4599  		return true
  4600  	}
  4601  	// match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
  4602  	// cond:
  4603  	// result: (BSFQ (ORQconst <t> [1<<16] x))
  4604  	for {
  4605  		v_0 := v.Args[0]
  4606  		if v_0.Op != OpAMD64ORQconst {
  4607  			break
  4608  		}
  4609  		t := v_0.Type
  4610  		if v_0.AuxInt != 1<<16 {
  4611  			break
  4612  		}
  4613  		v_0_0 := v_0.Args[0]
  4614  		if v_0_0.Op != OpAMD64MOVWQZX {
  4615  			break
  4616  		}
  4617  		x := v_0_0.Args[0]
  4618  		v.reset(OpAMD64BSFQ)
  4619  		v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
  4620  		v0.AuxInt = 1 << 16
  4621  		v0.AddArg(x)
  4622  		v.AddArg(v0)
  4623  		return true
  4624  	}
  4625  	return false
  4626  }
  4627  func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool {
  4628  	// match: (BTCLconst [c] (XORLconst [d] x))
  4629  	// cond:
  4630  	// result: (XORLconst [d ^ 1<<uint32(c)] x)
  4631  	for {
  4632  		c := v.AuxInt
  4633  		v_0 := v.Args[0]
  4634  		if v_0.Op != OpAMD64XORLconst {
  4635  			break
  4636  		}
  4637  		d := v_0.AuxInt
  4638  		x := v_0.Args[0]
  4639  		v.reset(OpAMD64XORLconst)
  4640  		v.AuxInt = d ^ 1<<uint32(c)
  4641  		v.AddArg(x)
  4642  		return true
  4643  	}
  4644  	// match: (BTCLconst [c] (BTCLconst [d] x))
  4645  	// cond:
  4646  	// result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x)
  4647  	for {
  4648  		c := v.AuxInt
  4649  		v_0 := v.Args[0]
  4650  		if v_0.Op != OpAMD64BTCLconst {
  4651  			break
  4652  		}
  4653  		d := v_0.AuxInt
  4654  		x := v_0.Args[0]
  4655  		v.reset(OpAMD64XORLconst)
  4656  		v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
  4657  		v.AddArg(x)
  4658  		return true
  4659  	}
  4660  	// match: (BTCLconst [c] (MOVLconst [d]))
  4661  	// cond:
  4662  	// result: (MOVLconst [d^(1<<uint32(c))])
  4663  	for {
  4664  		c := v.AuxInt
  4665  		v_0 := v.Args[0]
  4666  		if v_0.Op != OpAMD64MOVLconst {
  4667  			break
  4668  		}
  4669  		d := v_0.AuxInt
  4670  		v.reset(OpAMD64MOVLconst)
  4671  		v.AuxInt = d ^ (1 << uint32(c))
  4672  		return true
  4673  	}
  4674  	return false
  4675  }
  4676  func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool {
  4677  	// match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  4678  	// cond: ValAndOff(valoff1).canAdd(off2)
  4679  	// result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  4680  	for {
  4681  		valoff1 := v.AuxInt
  4682  		sym := v.Aux
  4683  		_ = v.Args[1]
  4684  		v_0 := v.Args[0]
  4685  		if v_0.Op != OpAMD64ADDQconst {
  4686  			break
  4687  		}
  4688  		off2 := v_0.AuxInt
  4689  		base := v_0.Args[0]
  4690  		mem := v.Args[1]
  4691  		if !(ValAndOff(valoff1).canAdd(off2)) {
  4692  			break
  4693  		}
  4694  		v.reset(OpAMD64BTCLconstmodify)
  4695  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4696  		v.Aux = sym
  4697  		v.AddArg(base)
  4698  		v.AddArg(mem)
  4699  		return true
  4700  	}
  4701  	// match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  4702  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  4703  	// result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  4704  	for {
  4705  		valoff1 := v.AuxInt
  4706  		sym1 := v.Aux
  4707  		_ = v.Args[1]
  4708  		v_0 := v.Args[0]
  4709  		if v_0.Op != OpAMD64LEAQ {
  4710  			break
  4711  		}
  4712  		off2 := v_0.AuxInt
  4713  		sym2 := v_0.Aux
  4714  		base := v_0.Args[0]
  4715  		mem := v.Args[1]
  4716  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  4717  			break
  4718  		}
  4719  		v.reset(OpAMD64BTCLconstmodify)
  4720  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4721  		v.Aux = mergeSym(sym1, sym2)
  4722  		v.AddArg(base)
  4723  		v.AddArg(mem)
  4724  		return true
  4725  	}
  4726  	return false
  4727  }
  4728  func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool {
  4729  	// match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  4730  	// cond: is32Bit(off1+off2)
  4731  	// result: (BTCLmodify [off1+off2] {sym} base val mem)
  4732  	for {
  4733  		off1 := v.AuxInt
  4734  		sym := v.Aux
  4735  		_ = v.Args[2]
  4736  		v_0 := v.Args[0]
  4737  		if v_0.Op != OpAMD64ADDQconst {
  4738  			break
  4739  		}
  4740  		off2 := v_0.AuxInt
  4741  		base := v_0.Args[0]
  4742  		val := v.Args[1]
  4743  		mem := v.Args[2]
  4744  		if !(is32Bit(off1 + off2)) {
  4745  			break
  4746  		}
  4747  		v.reset(OpAMD64BTCLmodify)
  4748  		v.AuxInt = off1 + off2
  4749  		v.Aux = sym
  4750  		v.AddArg(base)
  4751  		v.AddArg(val)
  4752  		v.AddArg(mem)
  4753  		return true
  4754  	}
  4755  	// match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  4756  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4757  	// result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  4758  	for {
  4759  		off1 := v.AuxInt
  4760  		sym1 := v.Aux
  4761  		_ = v.Args[2]
  4762  		v_0 := v.Args[0]
  4763  		if v_0.Op != OpAMD64LEAQ {
  4764  			break
  4765  		}
  4766  		off2 := v_0.AuxInt
  4767  		sym2 := v_0.Aux
  4768  		base := v_0.Args[0]
  4769  		val := v.Args[1]
  4770  		mem := v.Args[2]
  4771  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4772  			break
  4773  		}
  4774  		v.reset(OpAMD64BTCLmodify)
  4775  		v.AuxInt = off1 + off2
  4776  		v.Aux = mergeSym(sym1, sym2)
  4777  		v.AddArg(base)
  4778  		v.AddArg(val)
  4779  		v.AddArg(mem)
  4780  		return true
  4781  	}
  4782  	return false
  4783  }
  4784  func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool {
  4785  	// match: (BTCQconst [c] (XORQconst [d] x))
  4786  	// cond:
  4787  	// result: (XORQconst [d ^ 1<<uint32(c)] x)
  4788  	for {
  4789  		c := v.AuxInt
  4790  		v_0 := v.Args[0]
  4791  		if v_0.Op != OpAMD64XORQconst {
  4792  			break
  4793  		}
  4794  		d := v_0.AuxInt
  4795  		x := v_0.Args[0]
  4796  		v.reset(OpAMD64XORQconst)
  4797  		v.AuxInt = d ^ 1<<uint32(c)
  4798  		v.AddArg(x)
  4799  		return true
  4800  	}
  4801  	// match: (BTCQconst [c] (BTCQconst [d] x))
  4802  	// cond:
  4803  	// result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
  4804  	for {
  4805  		c := v.AuxInt
  4806  		v_0 := v.Args[0]
  4807  		if v_0.Op != OpAMD64BTCQconst {
  4808  			break
  4809  		}
  4810  		d := v_0.AuxInt
  4811  		x := v_0.Args[0]
  4812  		v.reset(OpAMD64XORQconst)
  4813  		v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
  4814  		v.AddArg(x)
  4815  		return true
  4816  	}
  4817  	// match: (BTCQconst [c] (MOVQconst [d]))
  4818  	// cond:
  4819  	// result: (MOVQconst [d^(1<<uint32(c))])
  4820  	for {
  4821  		c := v.AuxInt
  4822  		v_0 := v.Args[0]
  4823  		if v_0.Op != OpAMD64MOVQconst {
  4824  			break
  4825  		}
  4826  		d := v_0.AuxInt
  4827  		v.reset(OpAMD64MOVQconst)
  4828  		v.AuxInt = d ^ (1 << uint32(c))
  4829  		return true
  4830  	}
  4831  	return false
  4832  }
  4833  func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool {
  4834  	// match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  4835  	// cond: ValAndOff(valoff1).canAdd(off2)
  4836  	// result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  4837  	for {
  4838  		valoff1 := v.AuxInt
  4839  		sym := v.Aux
  4840  		_ = v.Args[1]
  4841  		v_0 := v.Args[0]
  4842  		if v_0.Op != OpAMD64ADDQconst {
  4843  			break
  4844  		}
  4845  		off2 := v_0.AuxInt
  4846  		base := v_0.Args[0]
  4847  		mem := v.Args[1]
  4848  		if !(ValAndOff(valoff1).canAdd(off2)) {
  4849  			break
  4850  		}
  4851  		v.reset(OpAMD64BTCQconstmodify)
  4852  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4853  		v.Aux = sym
  4854  		v.AddArg(base)
  4855  		v.AddArg(mem)
  4856  		return true
  4857  	}
  4858  	// match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  4859  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  4860  	// result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  4861  	for {
  4862  		valoff1 := v.AuxInt
  4863  		sym1 := v.Aux
  4864  		_ = v.Args[1]
  4865  		v_0 := v.Args[0]
  4866  		if v_0.Op != OpAMD64LEAQ {
  4867  			break
  4868  		}
  4869  		off2 := v_0.AuxInt
  4870  		sym2 := v_0.Aux
  4871  		base := v_0.Args[0]
  4872  		mem := v.Args[1]
  4873  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  4874  			break
  4875  		}
  4876  		v.reset(OpAMD64BTCQconstmodify)
  4877  		v.AuxInt = ValAndOff(valoff1).add(off2)
  4878  		v.Aux = mergeSym(sym1, sym2)
  4879  		v.AddArg(base)
  4880  		v.AddArg(mem)
  4881  		return true
  4882  	}
  4883  	return false
  4884  }
  4885  func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool {
  4886  	// match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  4887  	// cond: is32Bit(off1+off2)
  4888  	// result: (BTCQmodify [off1+off2] {sym} base val mem)
  4889  	for {
  4890  		off1 := v.AuxInt
  4891  		sym := v.Aux
  4892  		_ = v.Args[2]
  4893  		v_0 := v.Args[0]
  4894  		if v_0.Op != OpAMD64ADDQconst {
  4895  			break
  4896  		}
  4897  		off2 := v_0.AuxInt
  4898  		base := v_0.Args[0]
  4899  		val := v.Args[1]
  4900  		mem := v.Args[2]
  4901  		if !(is32Bit(off1 + off2)) {
  4902  			break
  4903  		}
  4904  		v.reset(OpAMD64BTCQmodify)
  4905  		v.AuxInt = off1 + off2
  4906  		v.Aux = sym
  4907  		v.AddArg(base)
  4908  		v.AddArg(val)
  4909  		v.AddArg(mem)
  4910  		return true
  4911  	}
  4912  	// match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  4913  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  4914  	// result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  4915  	for {
  4916  		off1 := v.AuxInt
  4917  		sym1 := v.Aux
  4918  		_ = v.Args[2]
  4919  		v_0 := v.Args[0]
  4920  		if v_0.Op != OpAMD64LEAQ {
  4921  			break
  4922  		}
  4923  		off2 := v_0.AuxInt
  4924  		sym2 := v_0.Aux
  4925  		base := v_0.Args[0]
  4926  		val := v.Args[1]
  4927  		mem := v.Args[2]
  4928  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  4929  			break
  4930  		}
  4931  		v.reset(OpAMD64BTCQmodify)
  4932  		v.AuxInt = off1 + off2
  4933  		v.Aux = mergeSym(sym1, sym2)
  4934  		v.AddArg(base)
  4935  		v.AddArg(val)
  4936  		v.AddArg(mem)
  4937  		return true
  4938  	}
  4939  	return false
  4940  }
  4941  func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool {
  4942  	// match: (BTLconst [c] (SHRQconst [d] x))
  4943  	// cond: (c+d)<64
  4944  	// result: (BTQconst [c+d] x)
  4945  	for {
  4946  		c := v.AuxInt
  4947  		v_0 := v.Args[0]
  4948  		if v_0.Op != OpAMD64SHRQconst {
  4949  			break
  4950  		}
  4951  		d := v_0.AuxInt
  4952  		x := v_0.Args[0]
  4953  		if !((c + d) < 64) {
  4954  			break
  4955  		}
  4956  		v.reset(OpAMD64BTQconst)
  4957  		v.AuxInt = c + d
  4958  		v.AddArg(x)
  4959  		return true
  4960  	}
  4961  	// match: (BTLconst [c] (SHLQconst [d] x))
  4962  	// cond: c>d
  4963  	// result: (BTLconst [c-d] x)
  4964  	for {
  4965  		c := v.AuxInt
  4966  		v_0 := v.Args[0]
  4967  		if v_0.Op != OpAMD64SHLQconst {
  4968  			break
  4969  		}
  4970  		d := v_0.AuxInt
  4971  		x := v_0.Args[0]
  4972  		if !(c > d) {
  4973  			break
  4974  		}
  4975  		v.reset(OpAMD64BTLconst)
  4976  		v.AuxInt = c - d
  4977  		v.AddArg(x)
  4978  		return true
  4979  	}
  4980  	// match: (BTLconst [0] s:(SHRQ x y))
  4981  	// cond:
  4982  	// result: (BTQ y x)
  4983  	for {
  4984  		if v.AuxInt != 0 {
  4985  			break
  4986  		}
  4987  		s := v.Args[0]
  4988  		if s.Op != OpAMD64SHRQ {
  4989  			break
  4990  		}
  4991  		_ = s.Args[1]
  4992  		x := s.Args[0]
  4993  		y := s.Args[1]
  4994  		v.reset(OpAMD64BTQ)
  4995  		v.AddArg(y)
  4996  		v.AddArg(x)
  4997  		return true
  4998  	}
  4999  	// match: (BTLconst [c] (SHRLconst [d] x))
  5000  	// cond: (c+d)<32
  5001  	// result: (BTLconst [c+d] x)
  5002  	for {
  5003  		c := v.AuxInt
  5004  		v_0 := v.Args[0]
  5005  		if v_0.Op != OpAMD64SHRLconst {
  5006  			break
  5007  		}
  5008  		d := v_0.AuxInt
  5009  		x := v_0.Args[0]
  5010  		if !((c + d) < 32) {
  5011  			break
  5012  		}
  5013  		v.reset(OpAMD64BTLconst)
  5014  		v.AuxInt = c + d
  5015  		v.AddArg(x)
  5016  		return true
  5017  	}
  5018  	// match: (BTLconst [c] (SHLLconst [d] x))
  5019  	// cond: c>d
  5020  	// result: (BTLconst [c-d] x)
  5021  	for {
  5022  		c := v.AuxInt
  5023  		v_0 := v.Args[0]
  5024  		if v_0.Op != OpAMD64SHLLconst {
  5025  			break
  5026  		}
  5027  		d := v_0.AuxInt
  5028  		x := v_0.Args[0]
  5029  		if !(c > d) {
  5030  			break
  5031  		}
  5032  		v.reset(OpAMD64BTLconst)
  5033  		v.AuxInt = c - d
  5034  		v.AddArg(x)
  5035  		return true
  5036  	}
  5037  	// match: (BTLconst [0] s:(SHRL x y))
  5038  	// cond:
  5039  	// result: (BTL y x)
  5040  	for {
  5041  		if v.AuxInt != 0 {
  5042  			break
  5043  		}
  5044  		s := v.Args[0]
  5045  		if s.Op != OpAMD64SHRL {
  5046  			break
  5047  		}
  5048  		_ = s.Args[1]
  5049  		x := s.Args[0]
  5050  		y := s.Args[1]
  5051  		v.reset(OpAMD64BTL)
  5052  		v.AddArg(y)
  5053  		v.AddArg(x)
  5054  		return true
  5055  	}
  5056  	return false
  5057  }
  5058  func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
  5059  	// match: (BTQconst [c] (SHRQconst [d] x))
  5060  	// cond: (c+d)<64
  5061  	// result: (BTQconst [c+d] x)
  5062  	for {
  5063  		c := v.AuxInt
  5064  		v_0 := v.Args[0]
  5065  		if v_0.Op != OpAMD64SHRQconst {
  5066  			break
  5067  		}
  5068  		d := v_0.AuxInt
  5069  		x := v_0.Args[0]
  5070  		if !((c + d) < 64) {
  5071  			break
  5072  		}
  5073  		v.reset(OpAMD64BTQconst)
  5074  		v.AuxInt = c + d
  5075  		v.AddArg(x)
  5076  		return true
  5077  	}
  5078  	// match: (BTQconst [c] (SHLQconst [d] x))
  5079  	// cond: c>d
  5080  	// result: (BTQconst [c-d] x)
  5081  	for {
  5082  		c := v.AuxInt
  5083  		v_0 := v.Args[0]
  5084  		if v_0.Op != OpAMD64SHLQconst {
  5085  			break
  5086  		}
  5087  		d := v_0.AuxInt
  5088  		x := v_0.Args[0]
  5089  		if !(c > d) {
  5090  			break
  5091  		}
  5092  		v.reset(OpAMD64BTQconst)
  5093  		v.AuxInt = c - d
  5094  		v.AddArg(x)
  5095  		return true
  5096  	}
  5097  	// match: (BTQconst [0] s:(SHRQ x y))
  5098  	// cond:
  5099  	// result: (BTQ y x)
  5100  	for {
  5101  		if v.AuxInt != 0 {
  5102  			break
  5103  		}
  5104  		s := v.Args[0]
  5105  		if s.Op != OpAMD64SHRQ {
  5106  			break
  5107  		}
  5108  		_ = s.Args[1]
  5109  		x := s.Args[0]
  5110  		y := s.Args[1]
  5111  		v.reset(OpAMD64BTQ)
  5112  		v.AddArg(y)
  5113  		v.AddArg(x)
  5114  		return true
  5115  	}
  5116  	return false
  5117  }
  5118  func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool {
  5119  	// match: (BTRLconst [c] (BTSLconst [c] x))
  5120  	// cond:
  5121  	// result: (BTRLconst [c] x)
  5122  	for {
  5123  		c := v.AuxInt
  5124  		v_0 := v.Args[0]
  5125  		if v_0.Op != OpAMD64BTSLconst {
  5126  			break
  5127  		}
  5128  		if v_0.AuxInt != c {
  5129  			break
  5130  		}
  5131  		x := v_0.Args[0]
  5132  		v.reset(OpAMD64BTRLconst)
  5133  		v.AuxInt = c
  5134  		v.AddArg(x)
  5135  		return true
  5136  	}
  5137  	// match: (BTRLconst [c] (BTCLconst [c] x))
  5138  	// cond:
  5139  	// result: (BTRLconst [c] x)
  5140  	for {
  5141  		c := v.AuxInt
  5142  		v_0 := v.Args[0]
  5143  		if v_0.Op != OpAMD64BTCLconst {
  5144  			break
  5145  		}
  5146  		if v_0.AuxInt != c {
  5147  			break
  5148  		}
  5149  		x := v_0.Args[0]
  5150  		v.reset(OpAMD64BTRLconst)
  5151  		v.AuxInt = c
  5152  		v.AddArg(x)
  5153  		return true
  5154  	}
  5155  	// match: (BTRLconst [c] (ANDLconst [d] x))
  5156  	// cond:
  5157  	// result: (ANDLconst [d &^ (1<<uint32(c))] x)
  5158  	for {
  5159  		c := v.AuxInt
  5160  		v_0 := v.Args[0]
  5161  		if v_0.Op != OpAMD64ANDLconst {
  5162  			break
  5163  		}
  5164  		d := v_0.AuxInt
  5165  		x := v_0.Args[0]
  5166  		v.reset(OpAMD64ANDLconst)
  5167  		v.AuxInt = d &^ (1 << uint32(c))
  5168  		v.AddArg(x)
  5169  		return true
  5170  	}
  5171  	// match: (BTRLconst [c] (BTRLconst [d] x))
  5172  	// cond:
  5173  	// result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
  5174  	for {
  5175  		c := v.AuxInt
  5176  		v_0 := v.Args[0]
  5177  		if v_0.Op != OpAMD64BTRLconst {
  5178  			break
  5179  		}
  5180  		d := v_0.AuxInt
  5181  		x := v_0.Args[0]
  5182  		v.reset(OpAMD64ANDLconst)
  5183  		v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
  5184  		v.AddArg(x)
  5185  		return true
  5186  	}
  5187  	// match: (BTRLconst [c] (MOVLconst [d]))
  5188  	// cond:
  5189  	// result: (MOVLconst [d&^(1<<uint32(c))])
  5190  	for {
  5191  		c := v.AuxInt
  5192  		v_0 := v.Args[0]
  5193  		if v_0.Op != OpAMD64MOVLconst {
  5194  			break
  5195  		}
  5196  		d := v_0.AuxInt
  5197  		v.reset(OpAMD64MOVLconst)
  5198  		v.AuxInt = d &^ (1 << uint32(c))
  5199  		return true
  5200  	}
  5201  	return false
  5202  }
  5203  func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool {
  5204  	// match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  5205  	// cond: ValAndOff(valoff1).canAdd(off2)
  5206  	// result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  5207  	for {
  5208  		valoff1 := v.AuxInt
  5209  		sym := v.Aux
  5210  		_ = v.Args[1]
  5211  		v_0 := v.Args[0]
  5212  		if v_0.Op != OpAMD64ADDQconst {
  5213  			break
  5214  		}
  5215  		off2 := v_0.AuxInt
  5216  		base := v_0.Args[0]
  5217  		mem := v.Args[1]
  5218  		if !(ValAndOff(valoff1).canAdd(off2)) {
  5219  			break
  5220  		}
  5221  		v.reset(OpAMD64BTRLconstmodify)
  5222  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5223  		v.Aux = sym
  5224  		v.AddArg(base)
  5225  		v.AddArg(mem)
  5226  		return true
  5227  	}
  5228  	// match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5229  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  5230  	// result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  5231  	for {
  5232  		valoff1 := v.AuxInt
  5233  		sym1 := v.Aux
  5234  		_ = v.Args[1]
  5235  		v_0 := v.Args[0]
  5236  		if v_0.Op != OpAMD64LEAQ {
  5237  			break
  5238  		}
  5239  		off2 := v_0.AuxInt
  5240  		sym2 := v_0.Aux
  5241  		base := v_0.Args[0]
  5242  		mem := v.Args[1]
  5243  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  5244  			break
  5245  		}
  5246  		v.reset(OpAMD64BTRLconstmodify)
  5247  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5248  		v.Aux = mergeSym(sym1, sym2)
  5249  		v.AddArg(base)
  5250  		v.AddArg(mem)
  5251  		return true
  5252  	}
  5253  	return false
  5254  }
  5255  func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool {
  5256  	// match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  5257  	// cond: is32Bit(off1+off2)
  5258  	// result: (BTRLmodify [off1+off2] {sym} base val mem)
  5259  	for {
  5260  		off1 := v.AuxInt
  5261  		sym := v.Aux
  5262  		_ = v.Args[2]
  5263  		v_0 := v.Args[0]
  5264  		if v_0.Op != OpAMD64ADDQconst {
  5265  			break
  5266  		}
  5267  		off2 := v_0.AuxInt
  5268  		base := v_0.Args[0]
  5269  		val := v.Args[1]
  5270  		mem := v.Args[2]
  5271  		if !(is32Bit(off1 + off2)) {
  5272  			break
  5273  		}
  5274  		v.reset(OpAMD64BTRLmodify)
  5275  		v.AuxInt = off1 + off2
  5276  		v.Aux = sym
  5277  		v.AddArg(base)
  5278  		v.AddArg(val)
  5279  		v.AddArg(mem)
  5280  		return true
  5281  	}
  5282  	// match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  5283  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5284  	// result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  5285  	for {
  5286  		off1 := v.AuxInt
  5287  		sym1 := v.Aux
  5288  		_ = v.Args[2]
  5289  		v_0 := v.Args[0]
  5290  		if v_0.Op != OpAMD64LEAQ {
  5291  			break
  5292  		}
  5293  		off2 := v_0.AuxInt
  5294  		sym2 := v_0.Aux
  5295  		base := v_0.Args[0]
  5296  		val := v.Args[1]
  5297  		mem := v.Args[2]
  5298  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5299  			break
  5300  		}
  5301  		v.reset(OpAMD64BTRLmodify)
  5302  		v.AuxInt = off1 + off2
  5303  		v.Aux = mergeSym(sym1, sym2)
  5304  		v.AddArg(base)
  5305  		v.AddArg(val)
  5306  		v.AddArg(mem)
  5307  		return true
  5308  	}
  5309  	return false
  5310  }
  5311  func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
  5312  	// match: (BTRQconst [c] (BTSQconst [c] x))
  5313  	// cond:
  5314  	// result: (BTRQconst [c] x)
  5315  	for {
  5316  		c := v.AuxInt
  5317  		v_0 := v.Args[0]
  5318  		if v_0.Op != OpAMD64BTSQconst {
  5319  			break
  5320  		}
  5321  		if v_0.AuxInt != c {
  5322  			break
  5323  		}
  5324  		x := v_0.Args[0]
  5325  		v.reset(OpAMD64BTRQconst)
  5326  		v.AuxInt = c
  5327  		v.AddArg(x)
  5328  		return true
  5329  	}
  5330  	// match: (BTRQconst [c] (BTCQconst [c] x))
  5331  	// cond:
  5332  	// result: (BTRQconst [c] x)
  5333  	for {
  5334  		c := v.AuxInt
  5335  		v_0 := v.Args[0]
  5336  		if v_0.Op != OpAMD64BTCQconst {
  5337  			break
  5338  		}
  5339  		if v_0.AuxInt != c {
  5340  			break
  5341  		}
  5342  		x := v_0.Args[0]
  5343  		v.reset(OpAMD64BTRQconst)
  5344  		v.AuxInt = c
  5345  		v.AddArg(x)
  5346  		return true
  5347  	}
  5348  	// match: (BTRQconst [c] (ANDQconst [d] x))
  5349  	// cond:
  5350  	// result: (ANDQconst [d &^ (1<<uint32(c))] x)
  5351  	for {
  5352  		c := v.AuxInt
  5353  		v_0 := v.Args[0]
  5354  		if v_0.Op != OpAMD64ANDQconst {
  5355  			break
  5356  		}
  5357  		d := v_0.AuxInt
  5358  		x := v_0.Args[0]
  5359  		v.reset(OpAMD64ANDQconst)
  5360  		v.AuxInt = d &^ (1 << uint32(c))
  5361  		v.AddArg(x)
  5362  		return true
  5363  	}
  5364  	// match: (BTRQconst [c] (BTRQconst [d] x))
  5365  	// cond:
  5366  	// result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
  5367  	for {
  5368  		c := v.AuxInt
  5369  		v_0 := v.Args[0]
  5370  		if v_0.Op != OpAMD64BTRQconst {
  5371  			break
  5372  		}
  5373  		d := v_0.AuxInt
  5374  		x := v_0.Args[0]
  5375  		v.reset(OpAMD64ANDQconst)
  5376  		v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
  5377  		v.AddArg(x)
  5378  		return true
  5379  	}
  5380  	// match: (BTRQconst [c] (MOVQconst [d]))
  5381  	// cond:
  5382  	// result: (MOVQconst [d&^(1<<uint32(c))])
  5383  	for {
  5384  		c := v.AuxInt
  5385  		v_0 := v.Args[0]
  5386  		if v_0.Op != OpAMD64MOVQconst {
  5387  			break
  5388  		}
  5389  		d := v_0.AuxInt
  5390  		v.reset(OpAMD64MOVQconst)
  5391  		v.AuxInt = d &^ (1 << uint32(c))
  5392  		return true
  5393  	}
  5394  	return false
  5395  }
  5396  func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool {
  5397  	// match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  5398  	// cond: ValAndOff(valoff1).canAdd(off2)
  5399  	// result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  5400  	for {
  5401  		valoff1 := v.AuxInt
  5402  		sym := v.Aux
  5403  		_ = v.Args[1]
  5404  		v_0 := v.Args[0]
  5405  		if v_0.Op != OpAMD64ADDQconst {
  5406  			break
  5407  		}
  5408  		off2 := v_0.AuxInt
  5409  		base := v_0.Args[0]
  5410  		mem := v.Args[1]
  5411  		if !(ValAndOff(valoff1).canAdd(off2)) {
  5412  			break
  5413  		}
  5414  		v.reset(OpAMD64BTRQconstmodify)
  5415  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5416  		v.Aux = sym
  5417  		v.AddArg(base)
  5418  		v.AddArg(mem)
  5419  		return true
  5420  	}
  5421  	// match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5422  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  5423  	// result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  5424  	for {
  5425  		valoff1 := v.AuxInt
  5426  		sym1 := v.Aux
  5427  		_ = v.Args[1]
  5428  		v_0 := v.Args[0]
  5429  		if v_0.Op != OpAMD64LEAQ {
  5430  			break
  5431  		}
  5432  		off2 := v_0.AuxInt
  5433  		sym2 := v_0.Aux
  5434  		base := v_0.Args[0]
  5435  		mem := v.Args[1]
  5436  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  5437  			break
  5438  		}
  5439  		v.reset(OpAMD64BTRQconstmodify)
  5440  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5441  		v.Aux = mergeSym(sym1, sym2)
  5442  		v.AddArg(base)
  5443  		v.AddArg(mem)
  5444  		return true
  5445  	}
  5446  	return false
  5447  }
  5448  func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool {
  5449  	// match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  5450  	// cond: is32Bit(off1+off2)
  5451  	// result: (BTRQmodify [off1+off2] {sym} base val mem)
  5452  	for {
  5453  		off1 := v.AuxInt
  5454  		sym := v.Aux
  5455  		_ = v.Args[2]
  5456  		v_0 := v.Args[0]
  5457  		if v_0.Op != OpAMD64ADDQconst {
  5458  			break
  5459  		}
  5460  		off2 := v_0.AuxInt
  5461  		base := v_0.Args[0]
  5462  		val := v.Args[1]
  5463  		mem := v.Args[2]
  5464  		if !(is32Bit(off1 + off2)) {
  5465  			break
  5466  		}
  5467  		v.reset(OpAMD64BTRQmodify)
  5468  		v.AuxInt = off1 + off2
  5469  		v.Aux = sym
  5470  		v.AddArg(base)
  5471  		v.AddArg(val)
  5472  		v.AddArg(mem)
  5473  		return true
  5474  	}
  5475  	// match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  5476  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5477  	// result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  5478  	for {
  5479  		off1 := v.AuxInt
  5480  		sym1 := v.Aux
  5481  		_ = v.Args[2]
  5482  		v_0 := v.Args[0]
  5483  		if v_0.Op != OpAMD64LEAQ {
  5484  			break
  5485  		}
  5486  		off2 := v_0.AuxInt
  5487  		sym2 := v_0.Aux
  5488  		base := v_0.Args[0]
  5489  		val := v.Args[1]
  5490  		mem := v.Args[2]
  5491  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5492  			break
  5493  		}
  5494  		v.reset(OpAMD64BTRQmodify)
  5495  		v.AuxInt = off1 + off2
  5496  		v.Aux = mergeSym(sym1, sym2)
  5497  		v.AddArg(base)
  5498  		v.AddArg(val)
  5499  		v.AddArg(mem)
  5500  		return true
  5501  	}
  5502  	return false
  5503  }
  5504  func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
  5505  	// match: (BTSLconst [c] (BTRLconst [c] x))
  5506  	// cond:
  5507  	// result: (BTSLconst [c] x)
  5508  	for {
  5509  		c := v.AuxInt
  5510  		v_0 := v.Args[0]
  5511  		if v_0.Op != OpAMD64BTRLconst {
  5512  			break
  5513  		}
  5514  		if v_0.AuxInt != c {
  5515  			break
  5516  		}
  5517  		x := v_0.Args[0]
  5518  		v.reset(OpAMD64BTSLconst)
  5519  		v.AuxInt = c
  5520  		v.AddArg(x)
  5521  		return true
  5522  	}
  5523  	// match: (BTSLconst [c] (BTCLconst [c] x))
  5524  	// cond:
  5525  	// result: (BTSLconst [c] x)
  5526  	for {
  5527  		c := v.AuxInt
  5528  		v_0 := v.Args[0]
  5529  		if v_0.Op != OpAMD64BTCLconst {
  5530  			break
  5531  		}
  5532  		if v_0.AuxInt != c {
  5533  			break
  5534  		}
  5535  		x := v_0.Args[0]
  5536  		v.reset(OpAMD64BTSLconst)
  5537  		v.AuxInt = c
  5538  		v.AddArg(x)
  5539  		return true
  5540  	}
  5541  	// match: (BTSLconst [c] (ORLconst [d] x))
  5542  	// cond:
  5543  	// result: (ORLconst [d | 1<<uint32(c)] x)
  5544  	for {
  5545  		c := v.AuxInt
  5546  		v_0 := v.Args[0]
  5547  		if v_0.Op != OpAMD64ORLconst {
  5548  			break
  5549  		}
  5550  		d := v_0.AuxInt
  5551  		x := v_0.Args[0]
  5552  		v.reset(OpAMD64ORLconst)
  5553  		v.AuxInt = d | 1<<uint32(c)
  5554  		v.AddArg(x)
  5555  		return true
  5556  	}
  5557  	// match: (BTSLconst [c] (BTSLconst [d] x))
  5558  	// cond:
  5559  	// result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x)
  5560  	for {
  5561  		c := v.AuxInt
  5562  		v_0 := v.Args[0]
  5563  		if v_0.Op != OpAMD64BTSLconst {
  5564  			break
  5565  		}
  5566  		d := v_0.AuxInt
  5567  		x := v_0.Args[0]
  5568  		v.reset(OpAMD64ORLconst)
  5569  		v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
  5570  		v.AddArg(x)
  5571  		return true
  5572  	}
  5573  	// match: (BTSLconst [c] (MOVLconst [d]))
  5574  	// cond:
  5575  	// result: (MOVLconst [d|(1<<uint32(c))])
  5576  	for {
  5577  		c := v.AuxInt
  5578  		v_0 := v.Args[0]
  5579  		if v_0.Op != OpAMD64MOVLconst {
  5580  			break
  5581  		}
  5582  		d := v_0.AuxInt
  5583  		v.reset(OpAMD64MOVLconst)
  5584  		v.AuxInt = d | (1 << uint32(c))
  5585  		return true
  5586  	}
  5587  	return false
  5588  }
  5589  func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool {
  5590  	// match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  5591  	// cond: ValAndOff(valoff1).canAdd(off2)
  5592  	// result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  5593  	for {
  5594  		valoff1 := v.AuxInt
  5595  		sym := v.Aux
  5596  		_ = v.Args[1]
  5597  		v_0 := v.Args[0]
  5598  		if v_0.Op != OpAMD64ADDQconst {
  5599  			break
  5600  		}
  5601  		off2 := v_0.AuxInt
  5602  		base := v_0.Args[0]
  5603  		mem := v.Args[1]
  5604  		if !(ValAndOff(valoff1).canAdd(off2)) {
  5605  			break
  5606  		}
  5607  		v.reset(OpAMD64BTSLconstmodify)
  5608  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5609  		v.Aux = sym
  5610  		v.AddArg(base)
  5611  		v.AddArg(mem)
  5612  		return true
  5613  	}
  5614  	// match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5615  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  5616  	// result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  5617  	for {
  5618  		valoff1 := v.AuxInt
  5619  		sym1 := v.Aux
  5620  		_ = v.Args[1]
  5621  		v_0 := v.Args[0]
  5622  		if v_0.Op != OpAMD64LEAQ {
  5623  			break
  5624  		}
  5625  		off2 := v_0.AuxInt
  5626  		sym2 := v_0.Aux
  5627  		base := v_0.Args[0]
  5628  		mem := v.Args[1]
  5629  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  5630  			break
  5631  		}
  5632  		v.reset(OpAMD64BTSLconstmodify)
  5633  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5634  		v.Aux = mergeSym(sym1, sym2)
  5635  		v.AddArg(base)
  5636  		v.AddArg(mem)
  5637  		return true
  5638  	}
  5639  	return false
  5640  }
  5641  func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool {
  5642  	// match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  5643  	// cond: is32Bit(off1+off2)
  5644  	// result: (BTSLmodify [off1+off2] {sym} base val mem)
  5645  	for {
  5646  		off1 := v.AuxInt
  5647  		sym := v.Aux
  5648  		_ = v.Args[2]
  5649  		v_0 := v.Args[0]
  5650  		if v_0.Op != OpAMD64ADDQconst {
  5651  			break
  5652  		}
  5653  		off2 := v_0.AuxInt
  5654  		base := v_0.Args[0]
  5655  		val := v.Args[1]
  5656  		mem := v.Args[2]
  5657  		if !(is32Bit(off1 + off2)) {
  5658  			break
  5659  		}
  5660  		v.reset(OpAMD64BTSLmodify)
  5661  		v.AuxInt = off1 + off2
  5662  		v.Aux = sym
  5663  		v.AddArg(base)
  5664  		v.AddArg(val)
  5665  		v.AddArg(mem)
  5666  		return true
  5667  	}
  5668  	// match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  5669  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5670  	// result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  5671  	for {
  5672  		off1 := v.AuxInt
  5673  		sym1 := v.Aux
  5674  		_ = v.Args[2]
  5675  		v_0 := v.Args[0]
  5676  		if v_0.Op != OpAMD64LEAQ {
  5677  			break
  5678  		}
  5679  		off2 := v_0.AuxInt
  5680  		sym2 := v_0.Aux
  5681  		base := v_0.Args[0]
  5682  		val := v.Args[1]
  5683  		mem := v.Args[2]
  5684  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5685  			break
  5686  		}
  5687  		v.reset(OpAMD64BTSLmodify)
  5688  		v.AuxInt = off1 + off2
  5689  		v.Aux = mergeSym(sym1, sym2)
  5690  		v.AddArg(base)
  5691  		v.AddArg(val)
  5692  		v.AddArg(mem)
  5693  		return true
  5694  	}
  5695  	return false
  5696  }
  5697  func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
  5698  	// match: (BTSQconst [c] (BTRQconst [c] x))
  5699  	// cond:
  5700  	// result: (BTSQconst [c] x)
  5701  	for {
  5702  		c := v.AuxInt
  5703  		v_0 := v.Args[0]
  5704  		if v_0.Op != OpAMD64BTRQconst {
  5705  			break
  5706  		}
  5707  		if v_0.AuxInt != c {
  5708  			break
  5709  		}
  5710  		x := v_0.Args[0]
  5711  		v.reset(OpAMD64BTSQconst)
  5712  		v.AuxInt = c
  5713  		v.AddArg(x)
  5714  		return true
  5715  	}
  5716  	// match: (BTSQconst [c] (BTCQconst [c] x))
  5717  	// cond:
  5718  	// result: (BTSQconst [c] x)
  5719  	for {
  5720  		c := v.AuxInt
  5721  		v_0 := v.Args[0]
  5722  		if v_0.Op != OpAMD64BTCQconst {
  5723  			break
  5724  		}
  5725  		if v_0.AuxInt != c {
  5726  			break
  5727  		}
  5728  		x := v_0.Args[0]
  5729  		v.reset(OpAMD64BTSQconst)
  5730  		v.AuxInt = c
  5731  		v.AddArg(x)
  5732  		return true
  5733  	}
  5734  	// match: (BTSQconst [c] (ORQconst [d] x))
  5735  	// cond:
  5736  	// result: (ORQconst [d | 1<<uint32(c)] x)
  5737  	for {
  5738  		c := v.AuxInt
  5739  		v_0 := v.Args[0]
  5740  		if v_0.Op != OpAMD64ORQconst {
  5741  			break
  5742  		}
  5743  		d := v_0.AuxInt
  5744  		x := v_0.Args[0]
  5745  		v.reset(OpAMD64ORQconst)
  5746  		v.AuxInt = d | 1<<uint32(c)
  5747  		v.AddArg(x)
  5748  		return true
  5749  	}
  5750  	// match: (BTSQconst [c] (BTSQconst [d] x))
  5751  	// cond:
  5752  	// result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x)
  5753  	for {
  5754  		c := v.AuxInt
  5755  		v_0 := v.Args[0]
  5756  		if v_0.Op != OpAMD64BTSQconst {
  5757  			break
  5758  		}
  5759  		d := v_0.AuxInt
  5760  		x := v_0.Args[0]
  5761  		v.reset(OpAMD64ORQconst)
  5762  		v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
  5763  		v.AddArg(x)
  5764  		return true
  5765  	}
  5766  	// match: (BTSQconst [c] (MOVQconst [d]))
  5767  	// cond:
  5768  	// result: (MOVQconst [d|(1<<uint32(c))])
  5769  	for {
  5770  		c := v.AuxInt
  5771  		v_0 := v.Args[0]
  5772  		if v_0.Op != OpAMD64MOVQconst {
  5773  			break
  5774  		}
  5775  		d := v_0.AuxInt
  5776  		v.reset(OpAMD64MOVQconst)
  5777  		v.AuxInt = d | (1 << uint32(c))
  5778  		return true
  5779  	}
  5780  	return false
  5781  }
  5782  func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool {
  5783  	// match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
  5784  	// cond: ValAndOff(valoff1).canAdd(off2)
  5785  	// result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
  5786  	for {
  5787  		valoff1 := v.AuxInt
  5788  		sym := v.Aux
  5789  		_ = v.Args[1]
  5790  		v_0 := v.Args[0]
  5791  		if v_0.Op != OpAMD64ADDQconst {
  5792  			break
  5793  		}
  5794  		off2 := v_0.AuxInt
  5795  		base := v_0.Args[0]
  5796  		mem := v.Args[1]
  5797  		if !(ValAndOff(valoff1).canAdd(off2)) {
  5798  			break
  5799  		}
  5800  		v.reset(OpAMD64BTSQconstmodify)
  5801  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5802  		v.Aux = sym
  5803  		v.AddArg(base)
  5804  		v.AddArg(mem)
  5805  		return true
  5806  	}
  5807  	// match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  5808  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  5809  	// result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  5810  	for {
  5811  		valoff1 := v.AuxInt
  5812  		sym1 := v.Aux
  5813  		_ = v.Args[1]
  5814  		v_0 := v.Args[0]
  5815  		if v_0.Op != OpAMD64LEAQ {
  5816  			break
  5817  		}
  5818  		off2 := v_0.AuxInt
  5819  		sym2 := v_0.Aux
  5820  		base := v_0.Args[0]
  5821  		mem := v.Args[1]
  5822  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  5823  			break
  5824  		}
  5825  		v.reset(OpAMD64BTSQconstmodify)
  5826  		v.AuxInt = ValAndOff(valoff1).add(off2)
  5827  		v.Aux = mergeSym(sym1, sym2)
  5828  		v.AddArg(base)
  5829  		v.AddArg(mem)
  5830  		return true
  5831  	}
  5832  	return false
  5833  }
  5834  func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool {
  5835  	// match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
  5836  	// cond: is32Bit(off1+off2)
  5837  	// result: (BTSQmodify [off1+off2] {sym} base val mem)
  5838  	for {
  5839  		off1 := v.AuxInt
  5840  		sym := v.Aux
  5841  		_ = v.Args[2]
  5842  		v_0 := v.Args[0]
  5843  		if v_0.Op != OpAMD64ADDQconst {
  5844  			break
  5845  		}
  5846  		off2 := v_0.AuxInt
  5847  		base := v_0.Args[0]
  5848  		val := v.Args[1]
  5849  		mem := v.Args[2]
  5850  		if !(is32Bit(off1 + off2)) {
  5851  			break
  5852  		}
  5853  		v.reset(OpAMD64BTSQmodify)
  5854  		v.AuxInt = off1 + off2
  5855  		v.Aux = sym
  5856  		v.AddArg(base)
  5857  		v.AddArg(val)
  5858  		v.AddArg(mem)
  5859  		return true
  5860  	}
  5861  	// match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  5862  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  5863  	// result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  5864  	for {
  5865  		off1 := v.AuxInt
  5866  		sym1 := v.Aux
  5867  		_ = v.Args[2]
  5868  		v_0 := v.Args[0]
  5869  		if v_0.Op != OpAMD64LEAQ {
  5870  			break
  5871  		}
  5872  		off2 := v_0.AuxInt
  5873  		sym2 := v_0.Aux
  5874  		base := v_0.Args[0]
  5875  		val := v.Args[1]
  5876  		mem := v.Args[2]
  5877  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  5878  			break
  5879  		}
  5880  		v.reset(OpAMD64BTSQmodify)
  5881  		v.AuxInt = off1 + off2
  5882  		v.Aux = mergeSym(sym1, sym2)
  5883  		v.AddArg(base)
  5884  		v.AddArg(val)
  5885  		v.AddArg(mem)
  5886  		return true
  5887  	}
  5888  	return false
  5889  }
  5890  func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
  5891  	// match: (CMOVLCC x y (InvertFlags cond))
  5892  	// cond:
  5893  	// result: (CMOVLLS x y cond)
  5894  	for {
  5895  		_ = v.Args[2]
  5896  		x := v.Args[0]
  5897  		y := v.Args[1]
  5898  		v_2 := v.Args[2]
  5899  		if v_2.Op != OpAMD64InvertFlags {
  5900  			break
  5901  		}
  5902  		cond := v_2.Args[0]
  5903  		v.reset(OpAMD64CMOVLLS)
  5904  		v.AddArg(x)
  5905  		v.AddArg(y)
  5906  		v.AddArg(cond)
  5907  		return true
  5908  	}
  5909  	// match: (CMOVLCC _ x (FlagEQ))
  5910  	// cond:
  5911  	// result: x
  5912  	for {
  5913  		_ = v.Args[2]
  5914  		x := v.Args[1]
  5915  		v_2 := v.Args[2]
  5916  		if v_2.Op != OpAMD64FlagEQ {
  5917  			break
  5918  		}
  5919  		v.reset(OpCopy)
  5920  		v.Type = x.Type
  5921  		v.AddArg(x)
  5922  		return true
  5923  	}
  5924  	// match: (CMOVLCC _ x (FlagGT_UGT))
  5925  	// cond:
  5926  	// result: x
  5927  	for {
  5928  		_ = v.Args[2]
  5929  		x := v.Args[1]
  5930  		v_2 := v.Args[2]
  5931  		if v_2.Op != OpAMD64FlagGT_UGT {
  5932  			break
  5933  		}
  5934  		v.reset(OpCopy)
  5935  		v.Type = x.Type
  5936  		v.AddArg(x)
  5937  		return true
  5938  	}
  5939  	// match: (CMOVLCC y _ (FlagGT_ULT))
  5940  	// cond:
  5941  	// result: y
  5942  	for {
  5943  		_ = v.Args[2]
  5944  		y := v.Args[0]
  5945  		v_2 := v.Args[2]
  5946  		if v_2.Op != OpAMD64FlagGT_ULT {
  5947  			break
  5948  		}
  5949  		v.reset(OpCopy)
  5950  		v.Type = y.Type
  5951  		v.AddArg(y)
  5952  		return true
  5953  	}
  5954  	// match: (CMOVLCC y _ (FlagLT_ULT))
  5955  	// cond:
  5956  	// result: y
  5957  	for {
  5958  		_ = v.Args[2]
  5959  		y := v.Args[0]
  5960  		v_2 := v.Args[2]
  5961  		if v_2.Op != OpAMD64FlagLT_ULT {
  5962  			break
  5963  		}
  5964  		v.reset(OpCopy)
  5965  		v.Type = y.Type
  5966  		v.AddArg(y)
  5967  		return true
  5968  	}
  5969  	// match: (CMOVLCC _ x (FlagLT_UGT))
  5970  	// cond:
  5971  	// result: x
  5972  	for {
  5973  		_ = v.Args[2]
  5974  		x := v.Args[1]
  5975  		v_2 := v.Args[2]
  5976  		if v_2.Op != OpAMD64FlagLT_UGT {
  5977  			break
  5978  		}
  5979  		v.reset(OpCopy)
  5980  		v.Type = x.Type
  5981  		v.AddArg(x)
  5982  		return true
  5983  	}
  5984  	return false
  5985  }
  5986  func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool {
  5987  	// match: (CMOVLCS x y (InvertFlags cond))
  5988  	// cond:
  5989  	// result: (CMOVLHI x y cond)
  5990  	for {
  5991  		_ = v.Args[2]
  5992  		x := v.Args[0]
  5993  		y := v.Args[1]
  5994  		v_2 := v.Args[2]
  5995  		if v_2.Op != OpAMD64InvertFlags {
  5996  			break
  5997  		}
  5998  		cond := v_2.Args[0]
  5999  		v.reset(OpAMD64CMOVLHI)
  6000  		v.AddArg(x)
  6001  		v.AddArg(y)
  6002  		v.AddArg(cond)
  6003  		return true
  6004  	}
  6005  	// match: (CMOVLCS y _ (FlagEQ))
  6006  	// cond:
  6007  	// result: y
  6008  	for {
  6009  		_ = v.Args[2]
  6010  		y := v.Args[0]
  6011  		v_2 := v.Args[2]
  6012  		if v_2.Op != OpAMD64FlagEQ {
  6013  			break
  6014  		}
  6015  		v.reset(OpCopy)
  6016  		v.Type = y.Type
  6017  		v.AddArg(y)
  6018  		return true
  6019  	}
  6020  	// match: (CMOVLCS y _ (FlagGT_UGT))
  6021  	// cond:
  6022  	// result: y
  6023  	for {
  6024  		_ = v.Args[2]
  6025  		y := v.Args[0]
  6026  		v_2 := v.Args[2]
  6027  		if v_2.Op != OpAMD64FlagGT_UGT {
  6028  			break
  6029  		}
  6030  		v.reset(OpCopy)
  6031  		v.Type = y.Type
  6032  		v.AddArg(y)
  6033  		return true
  6034  	}
  6035  	// match: (CMOVLCS _ x (FlagGT_ULT))
  6036  	// cond:
  6037  	// result: x
  6038  	for {
  6039  		_ = v.Args[2]
  6040  		x := v.Args[1]
  6041  		v_2 := v.Args[2]
  6042  		if v_2.Op != OpAMD64FlagGT_ULT {
  6043  			break
  6044  		}
  6045  		v.reset(OpCopy)
  6046  		v.Type = x.Type
  6047  		v.AddArg(x)
  6048  		return true
  6049  	}
  6050  	// match: (CMOVLCS _ x (FlagLT_ULT))
  6051  	// cond:
  6052  	// result: x
  6053  	for {
  6054  		_ = v.Args[2]
  6055  		x := v.Args[1]
  6056  		v_2 := v.Args[2]
  6057  		if v_2.Op != OpAMD64FlagLT_ULT {
  6058  			break
  6059  		}
  6060  		v.reset(OpCopy)
  6061  		v.Type = x.Type
  6062  		v.AddArg(x)
  6063  		return true
  6064  	}
  6065  	// match: (CMOVLCS y _ (FlagLT_UGT))
  6066  	// cond:
  6067  	// result: y
  6068  	for {
  6069  		_ = v.Args[2]
  6070  		y := v.Args[0]
  6071  		v_2 := v.Args[2]
  6072  		if v_2.Op != OpAMD64FlagLT_UGT {
  6073  			break
  6074  		}
  6075  		v.reset(OpCopy)
  6076  		v.Type = y.Type
  6077  		v.AddArg(y)
  6078  		return true
  6079  	}
  6080  	return false
  6081  }
  6082  func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool {
  6083  	// match: (CMOVLEQ x y (InvertFlags cond))
  6084  	// cond:
  6085  	// result: (CMOVLEQ x y cond)
  6086  	for {
  6087  		_ = v.Args[2]
  6088  		x := v.Args[0]
  6089  		y := v.Args[1]
  6090  		v_2 := v.Args[2]
  6091  		if v_2.Op != OpAMD64InvertFlags {
  6092  			break
  6093  		}
  6094  		cond := v_2.Args[0]
  6095  		v.reset(OpAMD64CMOVLEQ)
  6096  		v.AddArg(x)
  6097  		v.AddArg(y)
  6098  		v.AddArg(cond)
  6099  		return true
  6100  	}
  6101  	// match: (CMOVLEQ _ x (FlagEQ))
  6102  	// cond:
  6103  	// result: x
  6104  	for {
  6105  		_ = v.Args[2]
  6106  		x := v.Args[1]
  6107  		v_2 := v.Args[2]
  6108  		if v_2.Op != OpAMD64FlagEQ {
  6109  			break
  6110  		}
  6111  		v.reset(OpCopy)
  6112  		v.Type = x.Type
  6113  		v.AddArg(x)
  6114  		return true
  6115  	}
  6116  	// match: (CMOVLEQ y _ (FlagGT_UGT))
  6117  	// cond:
  6118  	// result: y
  6119  	for {
  6120  		_ = v.Args[2]
  6121  		y := v.Args[0]
  6122  		v_2 := v.Args[2]
  6123  		if v_2.Op != OpAMD64FlagGT_UGT {
  6124  			break
  6125  		}
  6126  		v.reset(OpCopy)
  6127  		v.Type = y.Type
  6128  		v.AddArg(y)
  6129  		return true
  6130  	}
  6131  	// match: (CMOVLEQ y _ (FlagGT_ULT))
  6132  	// cond:
  6133  	// result: y
  6134  	for {
  6135  		_ = v.Args[2]
  6136  		y := v.Args[0]
  6137  		v_2 := v.Args[2]
  6138  		if v_2.Op != OpAMD64FlagGT_ULT {
  6139  			break
  6140  		}
  6141  		v.reset(OpCopy)
  6142  		v.Type = y.Type
  6143  		v.AddArg(y)
  6144  		return true
  6145  	}
  6146  	// match: (CMOVLEQ y _ (FlagLT_ULT))
  6147  	// cond:
  6148  	// result: y
  6149  	for {
  6150  		_ = v.Args[2]
  6151  		y := v.Args[0]
  6152  		v_2 := v.Args[2]
  6153  		if v_2.Op != OpAMD64FlagLT_ULT {
  6154  			break
  6155  		}
  6156  		v.reset(OpCopy)
  6157  		v.Type = y.Type
  6158  		v.AddArg(y)
  6159  		return true
  6160  	}
  6161  	// match: (CMOVLEQ y _ (FlagLT_UGT))
  6162  	// cond:
  6163  	// result: y
  6164  	for {
  6165  		_ = v.Args[2]
  6166  		y := v.Args[0]
  6167  		v_2 := v.Args[2]
  6168  		if v_2.Op != OpAMD64FlagLT_UGT {
  6169  			break
  6170  		}
  6171  		v.reset(OpCopy)
  6172  		v.Type = y.Type
  6173  		v.AddArg(y)
  6174  		return true
  6175  	}
  6176  	return false
  6177  }
  6178  func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool {
  6179  	// match: (CMOVLGE x y (InvertFlags cond))
  6180  	// cond:
  6181  	// result: (CMOVLLE x y cond)
  6182  	for {
  6183  		_ = v.Args[2]
  6184  		x := v.Args[0]
  6185  		y := v.Args[1]
  6186  		v_2 := v.Args[2]
  6187  		if v_2.Op != OpAMD64InvertFlags {
  6188  			break
  6189  		}
  6190  		cond := v_2.Args[0]
  6191  		v.reset(OpAMD64CMOVLLE)
  6192  		v.AddArg(x)
  6193  		v.AddArg(y)
  6194  		v.AddArg(cond)
  6195  		return true
  6196  	}
  6197  	// match: (CMOVLGE _ x (FlagEQ))
  6198  	// cond:
  6199  	// result: x
  6200  	for {
  6201  		_ = v.Args[2]
  6202  		x := v.Args[1]
  6203  		v_2 := v.Args[2]
  6204  		if v_2.Op != OpAMD64FlagEQ {
  6205  			break
  6206  		}
  6207  		v.reset(OpCopy)
  6208  		v.Type = x.Type
  6209  		v.AddArg(x)
  6210  		return true
  6211  	}
  6212  	// match: (CMOVLGE _ x (FlagGT_UGT))
  6213  	// cond:
  6214  	// result: x
  6215  	for {
  6216  		_ = v.Args[2]
  6217  		x := v.Args[1]
  6218  		v_2 := v.Args[2]
  6219  		if v_2.Op != OpAMD64FlagGT_UGT {
  6220  			break
  6221  		}
  6222  		v.reset(OpCopy)
  6223  		v.Type = x.Type
  6224  		v.AddArg(x)
  6225  		return true
  6226  	}
  6227  	// match: (CMOVLGE _ x (FlagGT_ULT))
  6228  	// cond:
  6229  	// result: x
  6230  	for {
  6231  		_ = v.Args[2]
  6232  		x := v.Args[1]
  6233  		v_2 := v.Args[2]
  6234  		if v_2.Op != OpAMD64FlagGT_ULT {
  6235  			break
  6236  		}
  6237  		v.reset(OpCopy)
  6238  		v.Type = x.Type
  6239  		v.AddArg(x)
  6240  		return true
  6241  	}
  6242  	// match: (CMOVLGE y _ (FlagLT_ULT))
  6243  	// cond:
  6244  	// result: y
  6245  	for {
  6246  		_ = v.Args[2]
  6247  		y := v.Args[0]
  6248  		v_2 := v.Args[2]
  6249  		if v_2.Op != OpAMD64FlagLT_ULT {
  6250  			break
  6251  		}
  6252  		v.reset(OpCopy)
  6253  		v.Type = y.Type
  6254  		v.AddArg(y)
  6255  		return true
  6256  	}
  6257  	// match: (CMOVLGE y _ (FlagLT_UGT))
  6258  	// cond:
  6259  	// result: y
  6260  	for {
  6261  		_ = v.Args[2]
  6262  		y := v.Args[0]
  6263  		v_2 := v.Args[2]
  6264  		if v_2.Op != OpAMD64FlagLT_UGT {
  6265  			break
  6266  		}
  6267  		v.reset(OpCopy)
  6268  		v.Type = y.Type
  6269  		v.AddArg(y)
  6270  		return true
  6271  	}
  6272  	return false
  6273  }
  6274  func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool {
  6275  	// match: (CMOVLGT x y (InvertFlags cond))
  6276  	// cond:
  6277  	// result: (CMOVLLT x y cond)
  6278  	for {
  6279  		_ = v.Args[2]
  6280  		x := v.Args[0]
  6281  		y := v.Args[1]
  6282  		v_2 := v.Args[2]
  6283  		if v_2.Op != OpAMD64InvertFlags {
  6284  			break
  6285  		}
  6286  		cond := v_2.Args[0]
  6287  		v.reset(OpAMD64CMOVLLT)
  6288  		v.AddArg(x)
  6289  		v.AddArg(y)
  6290  		v.AddArg(cond)
  6291  		return true
  6292  	}
  6293  	// match: (CMOVLGT y _ (FlagEQ))
  6294  	// cond:
  6295  	// result: y
  6296  	for {
  6297  		_ = v.Args[2]
  6298  		y := v.Args[0]
  6299  		v_2 := v.Args[2]
  6300  		if v_2.Op != OpAMD64FlagEQ {
  6301  			break
  6302  		}
  6303  		v.reset(OpCopy)
  6304  		v.Type = y.Type
  6305  		v.AddArg(y)
  6306  		return true
  6307  	}
  6308  	// match: (CMOVLGT _ x (FlagGT_UGT))
  6309  	// cond:
  6310  	// result: x
  6311  	for {
  6312  		_ = v.Args[2]
  6313  		x := v.Args[1]
  6314  		v_2 := v.Args[2]
  6315  		if v_2.Op != OpAMD64FlagGT_UGT {
  6316  			break
  6317  		}
  6318  		v.reset(OpCopy)
  6319  		v.Type = x.Type
  6320  		v.AddArg(x)
  6321  		return true
  6322  	}
  6323  	// match: (CMOVLGT _ x (FlagGT_ULT))
  6324  	// cond:
  6325  	// result: x
  6326  	for {
  6327  		_ = v.Args[2]
  6328  		x := v.Args[1]
  6329  		v_2 := v.Args[2]
  6330  		if v_2.Op != OpAMD64FlagGT_ULT {
  6331  			break
  6332  		}
  6333  		v.reset(OpCopy)
  6334  		v.Type = x.Type
  6335  		v.AddArg(x)
  6336  		return true
  6337  	}
  6338  	// match: (CMOVLGT y _ (FlagLT_ULT))
  6339  	// cond:
  6340  	// result: y
  6341  	for {
  6342  		_ = v.Args[2]
  6343  		y := v.Args[0]
  6344  		v_2 := v.Args[2]
  6345  		if v_2.Op != OpAMD64FlagLT_ULT {
  6346  			break
  6347  		}
  6348  		v.reset(OpCopy)
  6349  		v.Type = y.Type
  6350  		v.AddArg(y)
  6351  		return true
  6352  	}
  6353  	// match: (CMOVLGT y _ (FlagLT_UGT))
  6354  	// cond:
  6355  	// result: y
  6356  	for {
  6357  		_ = v.Args[2]
  6358  		y := v.Args[0]
  6359  		v_2 := v.Args[2]
  6360  		if v_2.Op != OpAMD64FlagLT_UGT {
  6361  			break
  6362  		}
  6363  		v.reset(OpCopy)
  6364  		v.Type = y.Type
  6365  		v.AddArg(y)
  6366  		return true
  6367  	}
  6368  	return false
  6369  }
  6370  func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool {
  6371  	// match: (CMOVLHI x y (InvertFlags cond))
  6372  	// cond:
  6373  	// result: (CMOVLCS x y cond)
  6374  	for {
  6375  		_ = v.Args[2]
  6376  		x := v.Args[0]
  6377  		y := v.Args[1]
  6378  		v_2 := v.Args[2]
  6379  		if v_2.Op != OpAMD64InvertFlags {
  6380  			break
  6381  		}
  6382  		cond := v_2.Args[0]
  6383  		v.reset(OpAMD64CMOVLCS)
  6384  		v.AddArg(x)
  6385  		v.AddArg(y)
  6386  		v.AddArg(cond)
  6387  		return true
  6388  	}
  6389  	// match: (CMOVLHI y _ (FlagEQ))
  6390  	// cond:
  6391  	// result: y
  6392  	for {
  6393  		_ = v.Args[2]
  6394  		y := v.Args[0]
  6395  		v_2 := v.Args[2]
  6396  		if v_2.Op != OpAMD64FlagEQ {
  6397  			break
  6398  		}
  6399  		v.reset(OpCopy)
  6400  		v.Type = y.Type
  6401  		v.AddArg(y)
  6402  		return true
  6403  	}
  6404  	// match: (CMOVLHI _ x (FlagGT_UGT))
  6405  	// cond:
  6406  	// result: x
  6407  	for {
  6408  		_ = v.Args[2]
  6409  		x := v.Args[1]
  6410  		v_2 := v.Args[2]
  6411  		if v_2.Op != OpAMD64FlagGT_UGT {
  6412  			break
  6413  		}
  6414  		v.reset(OpCopy)
  6415  		v.Type = x.Type
  6416  		v.AddArg(x)
  6417  		return true
  6418  	}
  6419  	// match: (CMOVLHI y _ (FlagGT_ULT))
  6420  	// cond:
  6421  	// result: y
  6422  	for {
  6423  		_ = v.Args[2]
  6424  		y := v.Args[0]
  6425  		v_2 := v.Args[2]
  6426  		if v_2.Op != OpAMD64FlagGT_ULT {
  6427  			break
  6428  		}
  6429  		v.reset(OpCopy)
  6430  		v.Type = y.Type
  6431  		v.AddArg(y)
  6432  		return true
  6433  	}
  6434  	// match: (CMOVLHI y _ (FlagLT_ULT))
  6435  	// cond:
  6436  	// result: y
  6437  	for {
  6438  		_ = v.Args[2]
  6439  		y := v.Args[0]
  6440  		v_2 := v.Args[2]
  6441  		if v_2.Op != OpAMD64FlagLT_ULT {
  6442  			break
  6443  		}
  6444  		v.reset(OpCopy)
  6445  		v.Type = y.Type
  6446  		v.AddArg(y)
  6447  		return true
  6448  	}
  6449  	// match: (CMOVLHI _ x (FlagLT_UGT))
  6450  	// cond:
  6451  	// result: x
  6452  	for {
  6453  		_ = v.Args[2]
  6454  		x := v.Args[1]
  6455  		v_2 := v.Args[2]
  6456  		if v_2.Op != OpAMD64FlagLT_UGT {
  6457  			break
  6458  		}
  6459  		v.reset(OpCopy)
  6460  		v.Type = x.Type
  6461  		v.AddArg(x)
  6462  		return true
  6463  	}
  6464  	return false
  6465  }
  6466  func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool {
  6467  	// match: (CMOVLLE x y (InvertFlags cond))
  6468  	// cond:
  6469  	// result: (CMOVLGE x y cond)
  6470  	for {
  6471  		_ = v.Args[2]
  6472  		x := v.Args[0]
  6473  		y := v.Args[1]
  6474  		v_2 := v.Args[2]
  6475  		if v_2.Op != OpAMD64InvertFlags {
  6476  			break
  6477  		}
  6478  		cond := v_2.Args[0]
  6479  		v.reset(OpAMD64CMOVLGE)
  6480  		v.AddArg(x)
  6481  		v.AddArg(y)
  6482  		v.AddArg(cond)
  6483  		return true
  6484  	}
  6485  	// match: (CMOVLLE _ x (FlagEQ))
  6486  	// cond:
  6487  	// result: x
  6488  	for {
  6489  		_ = v.Args[2]
  6490  		x := v.Args[1]
  6491  		v_2 := v.Args[2]
  6492  		if v_2.Op != OpAMD64FlagEQ {
  6493  			break
  6494  		}
  6495  		v.reset(OpCopy)
  6496  		v.Type = x.Type
  6497  		v.AddArg(x)
  6498  		return true
  6499  	}
  6500  	// match: (CMOVLLE y _ (FlagGT_UGT))
  6501  	// cond:
  6502  	// result: y
  6503  	for {
  6504  		_ = v.Args[2]
  6505  		y := v.Args[0]
  6506  		v_2 := v.Args[2]
  6507  		if v_2.Op != OpAMD64FlagGT_UGT {
  6508  			break
  6509  		}
  6510  		v.reset(OpCopy)
  6511  		v.Type = y.Type
  6512  		v.AddArg(y)
  6513  		return true
  6514  	}
  6515  	// match: (CMOVLLE y _ (FlagGT_ULT))
  6516  	// cond:
  6517  	// result: y
  6518  	for {
  6519  		_ = v.Args[2]
  6520  		y := v.Args[0]
  6521  		v_2 := v.Args[2]
  6522  		if v_2.Op != OpAMD64FlagGT_ULT {
  6523  			break
  6524  		}
  6525  		v.reset(OpCopy)
  6526  		v.Type = y.Type
  6527  		v.AddArg(y)
  6528  		return true
  6529  	}
  6530  	// match: (CMOVLLE _ x (FlagLT_ULT))
  6531  	// cond:
  6532  	// result: x
  6533  	for {
  6534  		_ = v.Args[2]
  6535  		x := v.Args[1]
  6536  		v_2 := v.Args[2]
  6537  		if v_2.Op != OpAMD64FlagLT_ULT {
  6538  			break
  6539  		}
  6540  		v.reset(OpCopy)
  6541  		v.Type = x.Type
  6542  		v.AddArg(x)
  6543  		return true
  6544  	}
  6545  	// match: (CMOVLLE _ x (FlagLT_UGT))
  6546  	// cond:
  6547  	// result: x
  6548  	for {
  6549  		_ = v.Args[2]
  6550  		x := v.Args[1]
  6551  		v_2 := v.Args[2]
  6552  		if v_2.Op != OpAMD64FlagLT_UGT {
  6553  			break
  6554  		}
  6555  		v.reset(OpCopy)
  6556  		v.Type = x.Type
  6557  		v.AddArg(x)
  6558  		return true
  6559  	}
  6560  	return false
  6561  }
  6562  func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool {
  6563  	// match: (CMOVLLS x y (InvertFlags cond))
  6564  	// cond:
  6565  	// result: (CMOVLCC x y cond)
  6566  	for {
  6567  		_ = v.Args[2]
  6568  		x := v.Args[0]
  6569  		y := v.Args[1]
  6570  		v_2 := v.Args[2]
  6571  		if v_2.Op != OpAMD64InvertFlags {
  6572  			break
  6573  		}
  6574  		cond := v_2.Args[0]
  6575  		v.reset(OpAMD64CMOVLCC)
  6576  		v.AddArg(x)
  6577  		v.AddArg(y)
  6578  		v.AddArg(cond)
  6579  		return true
  6580  	}
  6581  	// match: (CMOVLLS _ x (FlagEQ))
  6582  	// cond:
  6583  	// result: x
  6584  	for {
  6585  		_ = v.Args[2]
  6586  		x := v.Args[1]
  6587  		v_2 := v.Args[2]
  6588  		if v_2.Op != OpAMD64FlagEQ {
  6589  			break
  6590  		}
  6591  		v.reset(OpCopy)
  6592  		v.Type = x.Type
  6593  		v.AddArg(x)
  6594  		return true
  6595  	}
  6596  	// match: (CMOVLLS y _ (FlagGT_UGT))
  6597  	// cond:
  6598  	// result: y
  6599  	for {
  6600  		_ = v.Args[2]
  6601  		y := v.Args[0]
  6602  		v_2 := v.Args[2]
  6603  		if v_2.Op != OpAMD64FlagGT_UGT {
  6604  			break
  6605  		}
  6606  		v.reset(OpCopy)
  6607  		v.Type = y.Type
  6608  		v.AddArg(y)
  6609  		return true
  6610  	}
  6611  	// match: (CMOVLLS _ x (FlagGT_ULT))
  6612  	// cond:
  6613  	// result: x
  6614  	for {
  6615  		_ = v.Args[2]
  6616  		x := v.Args[1]
  6617  		v_2 := v.Args[2]
  6618  		if v_2.Op != OpAMD64FlagGT_ULT {
  6619  			break
  6620  		}
  6621  		v.reset(OpCopy)
  6622  		v.Type = x.Type
  6623  		v.AddArg(x)
  6624  		return true
  6625  	}
  6626  	// match: (CMOVLLS _ x (FlagLT_ULT))
  6627  	// cond:
  6628  	// result: x
  6629  	for {
  6630  		_ = v.Args[2]
  6631  		x := v.Args[1]
  6632  		v_2 := v.Args[2]
  6633  		if v_2.Op != OpAMD64FlagLT_ULT {
  6634  			break
  6635  		}
  6636  		v.reset(OpCopy)
  6637  		v.Type = x.Type
  6638  		v.AddArg(x)
  6639  		return true
  6640  	}
  6641  	// match: (CMOVLLS y _ (FlagLT_UGT))
  6642  	// cond:
  6643  	// result: y
  6644  	for {
  6645  		_ = v.Args[2]
  6646  		y := v.Args[0]
  6647  		v_2 := v.Args[2]
  6648  		if v_2.Op != OpAMD64FlagLT_UGT {
  6649  			break
  6650  		}
  6651  		v.reset(OpCopy)
  6652  		v.Type = y.Type
  6653  		v.AddArg(y)
  6654  		return true
  6655  	}
  6656  	return false
  6657  }
  6658  func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool {
  6659  	// match: (CMOVLLT x y (InvertFlags cond))
  6660  	// cond:
  6661  	// result: (CMOVLGT x y cond)
  6662  	for {
  6663  		_ = v.Args[2]
  6664  		x := v.Args[0]
  6665  		y := v.Args[1]
  6666  		v_2 := v.Args[2]
  6667  		if v_2.Op != OpAMD64InvertFlags {
  6668  			break
  6669  		}
  6670  		cond := v_2.Args[0]
  6671  		v.reset(OpAMD64CMOVLGT)
  6672  		v.AddArg(x)
  6673  		v.AddArg(y)
  6674  		v.AddArg(cond)
  6675  		return true
  6676  	}
  6677  	// match: (CMOVLLT y _ (FlagEQ))
  6678  	// cond:
  6679  	// result: y
  6680  	for {
  6681  		_ = v.Args[2]
  6682  		y := v.Args[0]
  6683  		v_2 := v.Args[2]
  6684  		if v_2.Op != OpAMD64FlagEQ {
  6685  			break
  6686  		}
  6687  		v.reset(OpCopy)
  6688  		v.Type = y.Type
  6689  		v.AddArg(y)
  6690  		return true
  6691  	}
  6692  	// match: (CMOVLLT y _ (FlagGT_UGT))
  6693  	// cond:
  6694  	// result: y
  6695  	for {
  6696  		_ = v.Args[2]
  6697  		y := v.Args[0]
  6698  		v_2 := v.Args[2]
  6699  		if v_2.Op != OpAMD64FlagGT_UGT {
  6700  			break
  6701  		}
  6702  		v.reset(OpCopy)
  6703  		v.Type = y.Type
  6704  		v.AddArg(y)
  6705  		return true
  6706  	}
  6707  	// match: (CMOVLLT y _ (FlagGT_ULT))
  6708  	// cond:
  6709  	// result: y
  6710  	for {
  6711  		_ = v.Args[2]
  6712  		y := v.Args[0]
  6713  		v_2 := v.Args[2]
  6714  		if v_2.Op != OpAMD64FlagGT_ULT {
  6715  			break
  6716  		}
  6717  		v.reset(OpCopy)
  6718  		v.Type = y.Type
  6719  		v.AddArg(y)
  6720  		return true
  6721  	}
  6722  	// match: (CMOVLLT _ x (FlagLT_ULT))
  6723  	// cond:
  6724  	// result: x
  6725  	for {
  6726  		_ = v.Args[2]
  6727  		x := v.Args[1]
  6728  		v_2 := v.Args[2]
  6729  		if v_2.Op != OpAMD64FlagLT_ULT {
  6730  			break
  6731  		}
  6732  		v.reset(OpCopy)
  6733  		v.Type = x.Type
  6734  		v.AddArg(x)
  6735  		return true
  6736  	}
  6737  	// match: (CMOVLLT _ x (FlagLT_UGT))
  6738  	// cond:
  6739  	// result: x
  6740  	for {
  6741  		_ = v.Args[2]
  6742  		x := v.Args[1]
  6743  		v_2 := v.Args[2]
  6744  		if v_2.Op != OpAMD64FlagLT_UGT {
  6745  			break
  6746  		}
  6747  		v.reset(OpCopy)
  6748  		v.Type = x.Type
  6749  		v.AddArg(x)
  6750  		return true
  6751  	}
  6752  	return false
  6753  }
  6754  func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool {
  6755  	// match: (CMOVLNE x y (InvertFlags cond))
  6756  	// cond:
  6757  	// result: (CMOVLNE x y cond)
  6758  	for {
  6759  		_ = v.Args[2]
  6760  		x := v.Args[0]
  6761  		y := v.Args[1]
  6762  		v_2 := v.Args[2]
  6763  		if v_2.Op != OpAMD64InvertFlags {
  6764  			break
  6765  		}
  6766  		cond := v_2.Args[0]
  6767  		v.reset(OpAMD64CMOVLNE)
  6768  		v.AddArg(x)
  6769  		v.AddArg(y)
  6770  		v.AddArg(cond)
  6771  		return true
  6772  	}
  6773  	// match: (CMOVLNE y _ (FlagEQ))
  6774  	// cond:
  6775  	// result: y
  6776  	for {
  6777  		_ = v.Args[2]
  6778  		y := v.Args[0]
  6779  		v_2 := v.Args[2]
  6780  		if v_2.Op != OpAMD64FlagEQ {
  6781  			break
  6782  		}
  6783  		v.reset(OpCopy)
  6784  		v.Type = y.Type
  6785  		v.AddArg(y)
  6786  		return true
  6787  	}
  6788  	// match: (CMOVLNE _ x (FlagGT_UGT))
  6789  	// cond:
  6790  	// result: x
  6791  	for {
  6792  		_ = v.Args[2]
  6793  		x := v.Args[1]
  6794  		v_2 := v.Args[2]
  6795  		if v_2.Op != OpAMD64FlagGT_UGT {
  6796  			break
  6797  		}
  6798  		v.reset(OpCopy)
  6799  		v.Type = x.Type
  6800  		v.AddArg(x)
  6801  		return true
  6802  	}
  6803  	// match: (CMOVLNE _ x (FlagGT_ULT))
  6804  	// cond:
  6805  	// result: x
  6806  	for {
  6807  		_ = v.Args[2]
  6808  		x := v.Args[1]
  6809  		v_2 := v.Args[2]
  6810  		if v_2.Op != OpAMD64FlagGT_ULT {
  6811  			break
  6812  		}
  6813  		v.reset(OpCopy)
  6814  		v.Type = x.Type
  6815  		v.AddArg(x)
  6816  		return true
  6817  	}
  6818  	// match: (CMOVLNE _ x (FlagLT_ULT))
  6819  	// cond:
  6820  	// result: x
  6821  	for {
  6822  		_ = v.Args[2]
  6823  		x := v.Args[1]
  6824  		v_2 := v.Args[2]
  6825  		if v_2.Op != OpAMD64FlagLT_ULT {
  6826  			break
  6827  		}
  6828  		v.reset(OpCopy)
  6829  		v.Type = x.Type
  6830  		v.AddArg(x)
  6831  		return true
  6832  	}
  6833  	// match: (CMOVLNE _ x (FlagLT_UGT))
  6834  	// cond:
  6835  	// result: x
  6836  	for {
  6837  		_ = v.Args[2]
  6838  		x := v.Args[1]
  6839  		v_2 := v.Args[2]
  6840  		if v_2.Op != OpAMD64FlagLT_UGT {
  6841  			break
  6842  		}
  6843  		v.reset(OpCopy)
  6844  		v.Type = x.Type
  6845  		v.AddArg(x)
  6846  		return true
  6847  	}
  6848  	return false
  6849  }
  6850  func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool {
  6851  	// match: (CMOVQCC x y (InvertFlags cond))
  6852  	// cond:
  6853  	// result: (CMOVQLS x y cond)
  6854  	for {
  6855  		_ = v.Args[2]
  6856  		x := v.Args[0]
  6857  		y := v.Args[1]
  6858  		v_2 := v.Args[2]
  6859  		if v_2.Op != OpAMD64InvertFlags {
  6860  			break
  6861  		}
  6862  		cond := v_2.Args[0]
  6863  		v.reset(OpAMD64CMOVQLS)
  6864  		v.AddArg(x)
  6865  		v.AddArg(y)
  6866  		v.AddArg(cond)
  6867  		return true
  6868  	}
  6869  	// match: (CMOVQCC _ x (FlagEQ))
  6870  	// cond:
  6871  	// result: x
  6872  	for {
  6873  		_ = v.Args[2]
  6874  		x := v.Args[1]
  6875  		v_2 := v.Args[2]
  6876  		if v_2.Op != OpAMD64FlagEQ {
  6877  			break
  6878  		}
  6879  		v.reset(OpCopy)
  6880  		v.Type = x.Type
  6881  		v.AddArg(x)
  6882  		return true
  6883  	}
  6884  	// match: (CMOVQCC _ x (FlagGT_UGT))
  6885  	// cond:
  6886  	// result: x
  6887  	for {
  6888  		_ = v.Args[2]
  6889  		x := v.Args[1]
  6890  		v_2 := v.Args[2]
  6891  		if v_2.Op != OpAMD64FlagGT_UGT {
  6892  			break
  6893  		}
  6894  		v.reset(OpCopy)
  6895  		v.Type = x.Type
  6896  		v.AddArg(x)
  6897  		return true
  6898  	}
  6899  	// match: (CMOVQCC y _ (FlagGT_ULT))
  6900  	// cond:
  6901  	// result: y
  6902  	for {
  6903  		_ = v.Args[2]
  6904  		y := v.Args[0]
  6905  		v_2 := v.Args[2]
  6906  		if v_2.Op != OpAMD64FlagGT_ULT {
  6907  			break
  6908  		}
  6909  		v.reset(OpCopy)
  6910  		v.Type = y.Type
  6911  		v.AddArg(y)
  6912  		return true
  6913  	}
  6914  	// match: (CMOVQCC y _ (FlagLT_ULT))
  6915  	// cond:
  6916  	// result: y
  6917  	for {
  6918  		_ = v.Args[2]
  6919  		y := v.Args[0]
  6920  		v_2 := v.Args[2]
  6921  		if v_2.Op != OpAMD64FlagLT_ULT {
  6922  			break
  6923  		}
  6924  		v.reset(OpCopy)
  6925  		v.Type = y.Type
  6926  		v.AddArg(y)
  6927  		return true
  6928  	}
  6929  	// match: (CMOVQCC _ x (FlagLT_UGT))
  6930  	// cond:
  6931  	// result: x
  6932  	for {
  6933  		_ = v.Args[2]
  6934  		x := v.Args[1]
  6935  		v_2 := v.Args[2]
  6936  		if v_2.Op != OpAMD64FlagLT_UGT {
  6937  			break
  6938  		}
  6939  		v.reset(OpCopy)
  6940  		v.Type = x.Type
  6941  		v.AddArg(x)
  6942  		return true
  6943  	}
  6944  	return false
  6945  }
  6946  func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool {
  6947  	// match: (CMOVQCS x y (InvertFlags cond))
  6948  	// cond:
  6949  	// result: (CMOVQHI x y cond)
  6950  	for {
  6951  		_ = v.Args[2]
  6952  		x := v.Args[0]
  6953  		y := v.Args[1]
  6954  		v_2 := v.Args[2]
  6955  		if v_2.Op != OpAMD64InvertFlags {
  6956  			break
  6957  		}
  6958  		cond := v_2.Args[0]
  6959  		v.reset(OpAMD64CMOVQHI)
  6960  		v.AddArg(x)
  6961  		v.AddArg(y)
  6962  		v.AddArg(cond)
  6963  		return true
  6964  	}
  6965  	// match: (CMOVQCS y _ (FlagEQ))
  6966  	// cond:
  6967  	// result: y
  6968  	for {
  6969  		_ = v.Args[2]
  6970  		y := v.Args[0]
  6971  		v_2 := v.Args[2]
  6972  		if v_2.Op != OpAMD64FlagEQ {
  6973  			break
  6974  		}
  6975  		v.reset(OpCopy)
  6976  		v.Type = y.Type
  6977  		v.AddArg(y)
  6978  		return true
  6979  	}
  6980  	// match: (CMOVQCS y _ (FlagGT_UGT))
  6981  	// cond:
  6982  	// result: y
  6983  	for {
  6984  		_ = v.Args[2]
  6985  		y := v.Args[0]
  6986  		v_2 := v.Args[2]
  6987  		if v_2.Op != OpAMD64FlagGT_UGT {
  6988  			break
  6989  		}
  6990  		v.reset(OpCopy)
  6991  		v.Type = y.Type
  6992  		v.AddArg(y)
  6993  		return true
  6994  	}
  6995  	// match: (CMOVQCS _ x (FlagGT_ULT))
  6996  	// cond:
  6997  	// result: x
  6998  	for {
  6999  		_ = v.Args[2]
  7000  		x := v.Args[1]
  7001  		v_2 := v.Args[2]
  7002  		if v_2.Op != OpAMD64FlagGT_ULT {
  7003  			break
  7004  		}
  7005  		v.reset(OpCopy)
  7006  		v.Type = x.Type
  7007  		v.AddArg(x)
  7008  		return true
  7009  	}
  7010  	// match: (CMOVQCS _ x (FlagLT_ULT))
  7011  	// cond:
  7012  	// result: x
  7013  	for {
  7014  		_ = v.Args[2]
  7015  		x := v.Args[1]
  7016  		v_2 := v.Args[2]
  7017  		if v_2.Op != OpAMD64FlagLT_ULT {
  7018  			break
  7019  		}
  7020  		v.reset(OpCopy)
  7021  		v.Type = x.Type
  7022  		v.AddArg(x)
  7023  		return true
  7024  	}
  7025  	// match: (CMOVQCS y _ (FlagLT_UGT))
  7026  	// cond:
  7027  	// result: y
  7028  	for {
  7029  		_ = v.Args[2]
  7030  		y := v.Args[0]
  7031  		v_2 := v.Args[2]
  7032  		if v_2.Op != OpAMD64FlagLT_UGT {
  7033  			break
  7034  		}
  7035  		v.reset(OpCopy)
  7036  		v.Type = y.Type
  7037  		v.AddArg(y)
  7038  		return true
  7039  	}
  7040  	return false
  7041  }
  7042  func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
  7043  	// match: (CMOVQEQ x y (InvertFlags cond))
  7044  	// cond:
  7045  	// result: (CMOVQEQ x y cond)
  7046  	for {
  7047  		_ = v.Args[2]
  7048  		x := v.Args[0]
  7049  		y := v.Args[1]
  7050  		v_2 := v.Args[2]
  7051  		if v_2.Op != OpAMD64InvertFlags {
  7052  			break
  7053  		}
  7054  		cond := v_2.Args[0]
  7055  		v.reset(OpAMD64CMOVQEQ)
  7056  		v.AddArg(x)
  7057  		v.AddArg(y)
  7058  		v.AddArg(cond)
  7059  		return true
  7060  	}
  7061  	// match: (CMOVQEQ _ x (FlagEQ))
  7062  	// cond:
  7063  	// result: x
  7064  	for {
  7065  		_ = v.Args[2]
  7066  		x := v.Args[1]
  7067  		v_2 := v.Args[2]
  7068  		if v_2.Op != OpAMD64FlagEQ {
  7069  			break
  7070  		}
  7071  		v.reset(OpCopy)
  7072  		v.Type = x.Type
  7073  		v.AddArg(x)
  7074  		return true
  7075  	}
  7076  	// match: (CMOVQEQ y _ (FlagGT_UGT))
  7077  	// cond:
  7078  	// result: y
  7079  	for {
  7080  		_ = v.Args[2]
  7081  		y := v.Args[0]
  7082  		v_2 := v.Args[2]
  7083  		if v_2.Op != OpAMD64FlagGT_UGT {
  7084  			break
  7085  		}
  7086  		v.reset(OpCopy)
  7087  		v.Type = y.Type
  7088  		v.AddArg(y)
  7089  		return true
  7090  	}
  7091  	// match: (CMOVQEQ y _ (FlagGT_ULT))
  7092  	// cond:
  7093  	// result: y
  7094  	for {
  7095  		_ = v.Args[2]
  7096  		y := v.Args[0]
  7097  		v_2 := v.Args[2]
  7098  		if v_2.Op != OpAMD64FlagGT_ULT {
  7099  			break
  7100  		}
  7101  		v.reset(OpCopy)
  7102  		v.Type = y.Type
  7103  		v.AddArg(y)
  7104  		return true
  7105  	}
  7106  	// match: (CMOVQEQ y _ (FlagLT_ULT))
  7107  	// cond:
  7108  	// result: y
  7109  	for {
  7110  		_ = v.Args[2]
  7111  		y := v.Args[0]
  7112  		v_2 := v.Args[2]
  7113  		if v_2.Op != OpAMD64FlagLT_ULT {
  7114  			break
  7115  		}
  7116  		v.reset(OpCopy)
  7117  		v.Type = y.Type
  7118  		v.AddArg(y)
  7119  		return true
  7120  	}
  7121  	// match: (CMOVQEQ y _ (FlagLT_UGT))
  7122  	// cond:
  7123  	// result: y
  7124  	for {
  7125  		_ = v.Args[2]
  7126  		y := v.Args[0]
  7127  		v_2 := v.Args[2]
  7128  		if v_2.Op != OpAMD64FlagLT_UGT {
  7129  			break
  7130  		}
  7131  		v.reset(OpCopy)
  7132  		v.Type = y.Type
  7133  		v.AddArg(y)
  7134  		return true
  7135  	}
  7136  	// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
  7137  	// cond: c != 0
  7138  	// result: x
  7139  	for {
  7140  		_ = v.Args[2]
  7141  		x := v.Args[0]
  7142  		v_2 := v.Args[2]
  7143  		if v_2.Op != OpSelect1 {
  7144  			break
  7145  		}
  7146  		v_2_0 := v_2.Args[0]
  7147  		if v_2_0.Op != OpAMD64BSFQ {
  7148  			break
  7149  		}
  7150  		v_2_0_0 := v_2_0.Args[0]
  7151  		if v_2_0_0.Op != OpAMD64ORQconst {
  7152  			break
  7153  		}
  7154  		c := v_2_0_0.AuxInt
  7155  		if !(c != 0) {
  7156  			break
  7157  		}
  7158  		v.reset(OpCopy)
  7159  		v.Type = x.Type
  7160  		v.AddArg(x)
  7161  		return true
  7162  	}
  7163  	return false
  7164  }
  7165  func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool {
  7166  	// match: (CMOVQGE x y (InvertFlags cond))
  7167  	// cond:
  7168  	// result: (CMOVQLE x y cond)
  7169  	for {
  7170  		_ = v.Args[2]
  7171  		x := v.Args[0]
  7172  		y := v.Args[1]
  7173  		v_2 := v.Args[2]
  7174  		if v_2.Op != OpAMD64InvertFlags {
  7175  			break
  7176  		}
  7177  		cond := v_2.Args[0]
  7178  		v.reset(OpAMD64CMOVQLE)
  7179  		v.AddArg(x)
  7180  		v.AddArg(y)
  7181  		v.AddArg(cond)
  7182  		return true
  7183  	}
  7184  	// match: (CMOVQGE _ x (FlagEQ))
  7185  	// cond:
  7186  	// result: x
  7187  	for {
  7188  		_ = v.Args[2]
  7189  		x := v.Args[1]
  7190  		v_2 := v.Args[2]
  7191  		if v_2.Op != OpAMD64FlagEQ {
  7192  			break
  7193  		}
  7194  		v.reset(OpCopy)
  7195  		v.Type = x.Type
  7196  		v.AddArg(x)
  7197  		return true
  7198  	}
  7199  	// match: (CMOVQGE _ x (FlagGT_UGT))
  7200  	// cond:
  7201  	// result: x
  7202  	for {
  7203  		_ = v.Args[2]
  7204  		x := v.Args[1]
  7205  		v_2 := v.Args[2]
  7206  		if v_2.Op != OpAMD64FlagGT_UGT {
  7207  			break
  7208  		}
  7209  		v.reset(OpCopy)
  7210  		v.Type = x.Type
  7211  		v.AddArg(x)
  7212  		return true
  7213  	}
  7214  	// match: (CMOVQGE _ x (FlagGT_ULT))
  7215  	// cond:
  7216  	// result: x
  7217  	for {
  7218  		_ = v.Args[2]
  7219  		x := v.Args[1]
  7220  		v_2 := v.Args[2]
  7221  		if v_2.Op != OpAMD64FlagGT_ULT {
  7222  			break
  7223  		}
  7224  		v.reset(OpCopy)
  7225  		v.Type = x.Type
  7226  		v.AddArg(x)
  7227  		return true
  7228  	}
  7229  	// match: (CMOVQGE y _ (FlagLT_ULT))
  7230  	// cond:
  7231  	// result: y
  7232  	for {
  7233  		_ = v.Args[2]
  7234  		y := v.Args[0]
  7235  		v_2 := v.Args[2]
  7236  		if v_2.Op != OpAMD64FlagLT_ULT {
  7237  			break
  7238  		}
  7239  		v.reset(OpCopy)
  7240  		v.Type = y.Type
  7241  		v.AddArg(y)
  7242  		return true
  7243  	}
  7244  	// match: (CMOVQGE y _ (FlagLT_UGT))
  7245  	// cond:
  7246  	// result: y
  7247  	for {
  7248  		_ = v.Args[2]
  7249  		y := v.Args[0]
  7250  		v_2 := v.Args[2]
  7251  		if v_2.Op != OpAMD64FlagLT_UGT {
  7252  			break
  7253  		}
  7254  		v.reset(OpCopy)
  7255  		v.Type = y.Type
  7256  		v.AddArg(y)
  7257  		return true
  7258  	}
  7259  	return false
  7260  }
  7261  func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool {
  7262  	// match: (CMOVQGT x y (InvertFlags cond))
  7263  	// cond:
  7264  	// result: (CMOVQLT x y cond)
  7265  	for {
  7266  		_ = v.Args[2]
  7267  		x := v.Args[0]
  7268  		y := v.Args[1]
  7269  		v_2 := v.Args[2]
  7270  		if v_2.Op != OpAMD64InvertFlags {
  7271  			break
  7272  		}
  7273  		cond := v_2.Args[0]
  7274  		v.reset(OpAMD64CMOVQLT)
  7275  		v.AddArg(x)
  7276  		v.AddArg(y)
  7277  		v.AddArg(cond)
  7278  		return true
  7279  	}
  7280  	// match: (CMOVQGT y _ (FlagEQ))
  7281  	// cond:
  7282  	// result: y
  7283  	for {
  7284  		_ = v.Args[2]
  7285  		y := v.Args[0]
  7286  		v_2 := v.Args[2]
  7287  		if v_2.Op != OpAMD64FlagEQ {
  7288  			break
  7289  		}
  7290  		v.reset(OpCopy)
  7291  		v.Type = y.Type
  7292  		v.AddArg(y)
  7293  		return true
  7294  	}
  7295  	// match: (CMOVQGT _ x (FlagGT_UGT))
  7296  	// cond:
  7297  	// result: x
  7298  	for {
  7299  		_ = v.Args[2]
  7300  		x := v.Args[1]
  7301  		v_2 := v.Args[2]
  7302  		if v_2.Op != OpAMD64FlagGT_UGT {
  7303  			break
  7304  		}
  7305  		v.reset(OpCopy)
  7306  		v.Type = x.Type
  7307  		v.AddArg(x)
  7308  		return true
  7309  	}
  7310  	// match: (CMOVQGT _ x (FlagGT_ULT))
  7311  	// cond:
  7312  	// result: x
  7313  	for {
  7314  		_ = v.Args[2]
  7315  		x := v.Args[1]
  7316  		v_2 := v.Args[2]
  7317  		if v_2.Op != OpAMD64FlagGT_ULT {
  7318  			break
  7319  		}
  7320  		v.reset(OpCopy)
  7321  		v.Type = x.Type
  7322  		v.AddArg(x)
  7323  		return true
  7324  	}
  7325  	// match: (CMOVQGT y _ (FlagLT_ULT))
  7326  	// cond:
  7327  	// result: y
  7328  	for {
  7329  		_ = v.Args[2]
  7330  		y := v.Args[0]
  7331  		v_2 := v.Args[2]
  7332  		if v_2.Op != OpAMD64FlagLT_ULT {
  7333  			break
  7334  		}
  7335  		v.reset(OpCopy)
  7336  		v.Type = y.Type
  7337  		v.AddArg(y)
  7338  		return true
  7339  	}
  7340  	// match: (CMOVQGT y _ (FlagLT_UGT))
  7341  	// cond:
  7342  	// result: y
  7343  	for {
  7344  		_ = v.Args[2]
  7345  		y := v.Args[0]
  7346  		v_2 := v.Args[2]
  7347  		if v_2.Op != OpAMD64FlagLT_UGT {
  7348  			break
  7349  		}
  7350  		v.reset(OpCopy)
  7351  		v.Type = y.Type
  7352  		v.AddArg(y)
  7353  		return true
  7354  	}
  7355  	return false
  7356  }
  7357  func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool {
  7358  	// match: (CMOVQHI x y (InvertFlags cond))
  7359  	// cond:
  7360  	// result: (CMOVQCS x y cond)
  7361  	for {
  7362  		_ = v.Args[2]
  7363  		x := v.Args[0]
  7364  		y := v.Args[1]
  7365  		v_2 := v.Args[2]
  7366  		if v_2.Op != OpAMD64InvertFlags {
  7367  			break
  7368  		}
  7369  		cond := v_2.Args[0]
  7370  		v.reset(OpAMD64CMOVQCS)
  7371  		v.AddArg(x)
  7372  		v.AddArg(y)
  7373  		v.AddArg(cond)
  7374  		return true
  7375  	}
  7376  	// match: (CMOVQHI y _ (FlagEQ))
  7377  	// cond:
  7378  	// result: y
  7379  	for {
  7380  		_ = v.Args[2]
  7381  		y := v.Args[0]
  7382  		v_2 := v.Args[2]
  7383  		if v_2.Op != OpAMD64FlagEQ {
  7384  			break
  7385  		}
  7386  		v.reset(OpCopy)
  7387  		v.Type = y.Type
  7388  		v.AddArg(y)
  7389  		return true
  7390  	}
  7391  	// match: (CMOVQHI _ x (FlagGT_UGT))
  7392  	// cond:
  7393  	// result: x
  7394  	for {
  7395  		_ = v.Args[2]
  7396  		x := v.Args[1]
  7397  		v_2 := v.Args[2]
  7398  		if v_2.Op != OpAMD64FlagGT_UGT {
  7399  			break
  7400  		}
  7401  		v.reset(OpCopy)
  7402  		v.Type = x.Type
  7403  		v.AddArg(x)
  7404  		return true
  7405  	}
  7406  	// match: (CMOVQHI y _ (FlagGT_ULT))
  7407  	// cond:
  7408  	// result: y
  7409  	for {
  7410  		_ = v.Args[2]
  7411  		y := v.Args[0]
  7412  		v_2 := v.Args[2]
  7413  		if v_2.Op != OpAMD64FlagGT_ULT {
  7414  			break
  7415  		}
  7416  		v.reset(OpCopy)
  7417  		v.Type = y.Type
  7418  		v.AddArg(y)
  7419  		return true
  7420  	}
  7421  	// match: (CMOVQHI y _ (FlagLT_ULT))
  7422  	// cond:
  7423  	// result: y
  7424  	for {
  7425  		_ = v.Args[2]
  7426  		y := v.Args[0]
  7427  		v_2 := v.Args[2]
  7428  		if v_2.Op != OpAMD64FlagLT_ULT {
  7429  			break
  7430  		}
  7431  		v.reset(OpCopy)
  7432  		v.Type = y.Type
  7433  		v.AddArg(y)
  7434  		return true
  7435  	}
  7436  	// match: (CMOVQHI _ x (FlagLT_UGT))
  7437  	// cond:
  7438  	// result: x
  7439  	for {
  7440  		_ = v.Args[2]
  7441  		x := v.Args[1]
  7442  		v_2 := v.Args[2]
  7443  		if v_2.Op != OpAMD64FlagLT_UGT {
  7444  			break
  7445  		}
  7446  		v.reset(OpCopy)
  7447  		v.Type = x.Type
  7448  		v.AddArg(x)
  7449  		return true
  7450  	}
  7451  	return false
  7452  }
  7453  func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool {
  7454  	// match: (CMOVQLE x y (InvertFlags cond))
  7455  	// cond:
  7456  	// result: (CMOVQGE x y cond)
  7457  	for {
  7458  		_ = v.Args[2]
  7459  		x := v.Args[0]
  7460  		y := v.Args[1]
  7461  		v_2 := v.Args[2]
  7462  		if v_2.Op != OpAMD64InvertFlags {
  7463  			break
  7464  		}
  7465  		cond := v_2.Args[0]
  7466  		v.reset(OpAMD64CMOVQGE)
  7467  		v.AddArg(x)
  7468  		v.AddArg(y)
  7469  		v.AddArg(cond)
  7470  		return true
  7471  	}
  7472  	// match: (CMOVQLE _ x (FlagEQ))
  7473  	// cond:
  7474  	// result: x
  7475  	for {
  7476  		_ = v.Args[2]
  7477  		x := v.Args[1]
  7478  		v_2 := v.Args[2]
  7479  		if v_2.Op != OpAMD64FlagEQ {
  7480  			break
  7481  		}
  7482  		v.reset(OpCopy)
  7483  		v.Type = x.Type
  7484  		v.AddArg(x)
  7485  		return true
  7486  	}
  7487  	// match: (CMOVQLE y _ (FlagGT_UGT))
  7488  	// cond:
  7489  	// result: y
  7490  	for {
  7491  		_ = v.Args[2]
  7492  		y := v.Args[0]
  7493  		v_2 := v.Args[2]
  7494  		if v_2.Op != OpAMD64FlagGT_UGT {
  7495  			break
  7496  		}
  7497  		v.reset(OpCopy)
  7498  		v.Type = y.Type
  7499  		v.AddArg(y)
  7500  		return true
  7501  	}
  7502  	// match: (CMOVQLE y _ (FlagGT_ULT))
  7503  	// cond:
  7504  	// result: y
  7505  	for {
  7506  		_ = v.Args[2]
  7507  		y := v.Args[0]
  7508  		v_2 := v.Args[2]
  7509  		if v_2.Op != OpAMD64FlagGT_ULT {
  7510  			break
  7511  		}
  7512  		v.reset(OpCopy)
  7513  		v.Type = y.Type
  7514  		v.AddArg(y)
  7515  		return true
  7516  	}
  7517  	// match: (CMOVQLE _ x (FlagLT_ULT))
  7518  	// cond:
  7519  	// result: x
  7520  	for {
  7521  		_ = v.Args[2]
  7522  		x := v.Args[1]
  7523  		v_2 := v.Args[2]
  7524  		if v_2.Op != OpAMD64FlagLT_ULT {
  7525  			break
  7526  		}
  7527  		v.reset(OpCopy)
  7528  		v.Type = x.Type
  7529  		v.AddArg(x)
  7530  		return true
  7531  	}
  7532  	// match: (CMOVQLE _ x (FlagLT_UGT))
  7533  	// cond:
  7534  	// result: x
  7535  	for {
  7536  		_ = v.Args[2]
  7537  		x := v.Args[1]
  7538  		v_2 := v.Args[2]
  7539  		if v_2.Op != OpAMD64FlagLT_UGT {
  7540  			break
  7541  		}
  7542  		v.reset(OpCopy)
  7543  		v.Type = x.Type
  7544  		v.AddArg(x)
  7545  		return true
  7546  	}
  7547  	return false
  7548  }
  7549  func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool {
  7550  	// match: (CMOVQLS x y (InvertFlags cond))
  7551  	// cond:
  7552  	// result: (CMOVQCC x y cond)
  7553  	for {
  7554  		_ = v.Args[2]
  7555  		x := v.Args[0]
  7556  		y := v.Args[1]
  7557  		v_2 := v.Args[2]
  7558  		if v_2.Op != OpAMD64InvertFlags {
  7559  			break
  7560  		}
  7561  		cond := v_2.Args[0]
  7562  		v.reset(OpAMD64CMOVQCC)
  7563  		v.AddArg(x)
  7564  		v.AddArg(y)
  7565  		v.AddArg(cond)
  7566  		return true
  7567  	}
  7568  	// match: (CMOVQLS _ x (FlagEQ))
  7569  	// cond:
  7570  	// result: x
  7571  	for {
  7572  		_ = v.Args[2]
  7573  		x := v.Args[1]
  7574  		v_2 := v.Args[2]
  7575  		if v_2.Op != OpAMD64FlagEQ {
  7576  			break
  7577  		}
  7578  		v.reset(OpCopy)
  7579  		v.Type = x.Type
  7580  		v.AddArg(x)
  7581  		return true
  7582  	}
  7583  	// match: (CMOVQLS y _ (FlagGT_UGT))
  7584  	// cond:
  7585  	// result: y
  7586  	for {
  7587  		_ = v.Args[2]
  7588  		y := v.Args[0]
  7589  		v_2 := v.Args[2]
  7590  		if v_2.Op != OpAMD64FlagGT_UGT {
  7591  			break
  7592  		}
  7593  		v.reset(OpCopy)
  7594  		v.Type = y.Type
  7595  		v.AddArg(y)
  7596  		return true
  7597  	}
  7598  	// match: (CMOVQLS _ x (FlagGT_ULT))
  7599  	// cond:
  7600  	// result: x
  7601  	for {
  7602  		_ = v.Args[2]
  7603  		x := v.Args[1]
  7604  		v_2 := v.Args[2]
  7605  		if v_2.Op != OpAMD64FlagGT_ULT {
  7606  			break
  7607  		}
  7608  		v.reset(OpCopy)
  7609  		v.Type = x.Type
  7610  		v.AddArg(x)
  7611  		return true
  7612  	}
  7613  	// match: (CMOVQLS _ x (FlagLT_ULT))
  7614  	// cond:
  7615  	// result: x
  7616  	for {
  7617  		_ = v.Args[2]
  7618  		x := v.Args[1]
  7619  		v_2 := v.Args[2]
  7620  		if v_2.Op != OpAMD64FlagLT_ULT {
  7621  			break
  7622  		}
  7623  		v.reset(OpCopy)
  7624  		v.Type = x.Type
  7625  		v.AddArg(x)
  7626  		return true
  7627  	}
  7628  	// match: (CMOVQLS y _ (FlagLT_UGT))
  7629  	// cond:
  7630  	// result: y
  7631  	for {
  7632  		_ = v.Args[2]
  7633  		y := v.Args[0]
  7634  		v_2 := v.Args[2]
  7635  		if v_2.Op != OpAMD64FlagLT_UGT {
  7636  			break
  7637  		}
  7638  		v.reset(OpCopy)
  7639  		v.Type = y.Type
  7640  		v.AddArg(y)
  7641  		return true
  7642  	}
  7643  	return false
  7644  }
  7645  func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool {
  7646  	// match: (CMOVQLT x y (InvertFlags cond))
  7647  	// cond:
  7648  	// result: (CMOVQGT x y cond)
  7649  	for {
  7650  		_ = v.Args[2]
  7651  		x := v.Args[0]
  7652  		y := v.Args[1]
  7653  		v_2 := v.Args[2]
  7654  		if v_2.Op != OpAMD64InvertFlags {
  7655  			break
  7656  		}
  7657  		cond := v_2.Args[0]
  7658  		v.reset(OpAMD64CMOVQGT)
  7659  		v.AddArg(x)
  7660  		v.AddArg(y)
  7661  		v.AddArg(cond)
  7662  		return true
  7663  	}
  7664  	// match: (CMOVQLT y _ (FlagEQ))
  7665  	// cond:
  7666  	// result: y
  7667  	for {
  7668  		_ = v.Args[2]
  7669  		y := v.Args[0]
  7670  		v_2 := v.Args[2]
  7671  		if v_2.Op != OpAMD64FlagEQ {
  7672  			break
  7673  		}
  7674  		v.reset(OpCopy)
  7675  		v.Type = y.Type
  7676  		v.AddArg(y)
  7677  		return true
  7678  	}
  7679  	// match: (CMOVQLT y _ (FlagGT_UGT))
  7680  	// cond:
  7681  	// result: y
  7682  	for {
  7683  		_ = v.Args[2]
  7684  		y := v.Args[0]
  7685  		v_2 := v.Args[2]
  7686  		if v_2.Op != OpAMD64FlagGT_UGT {
  7687  			break
  7688  		}
  7689  		v.reset(OpCopy)
  7690  		v.Type = y.Type
  7691  		v.AddArg(y)
  7692  		return true
  7693  	}
  7694  	// match: (CMOVQLT y _ (FlagGT_ULT))
  7695  	// cond:
  7696  	// result: y
  7697  	for {
  7698  		_ = v.Args[2]
  7699  		y := v.Args[0]
  7700  		v_2 := v.Args[2]
  7701  		if v_2.Op != OpAMD64FlagGT_ULT {
  7702  			break
  7703  		}
  7704  		v.reset(OpCopy)
  7705  		v.Type = y.Type
  7706  		v.AddArg(y)
  7707  		return true
  7708  	}
  7709  	// match: (CMOVQLT _ x (FlagLT_ULT))
  7710  	// cond:
  7711  	// result: x
  7712  	for {
  7713  		_ = v.Args[2]
  7714  		x := v.Args[1]
  7715  		v_2 := v.Args[2]
  7716  		if v_2.Op != OpAMD64FlagLT_ULT {
  7717  			break
  7718  		}
  7719  		v.reset(OpCopy)
  7720  		v.Type = x.Type
  7721  		v.AddArg(x)
  7722  		return true
  7723  	}
  7724  	// match: (CMOVQLT _ x (FlagLT_UGT))
  7725  	// cond:
  7726  	// result: x
  7727  	for {
  7728  		_ = v.Args[2]
  7729  		x := v.Args[1]
  7730  		v_2 := v.Args[2]
  7731  		if v_2.Op != OpAMD64FlagLT_UGT {
  7732  			break
  7733  		}
  7734  		v.reset(OpCopy)
  7735  		v.Type = x.Type
  7736  		v.AddArg(x)
  7737  		return true
  7738  	}
  7739  	return false
  7740  }
  7741  func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool {
  7742  	// match: (CMOVQNE x y (InvertFlags cond))
  7743  	// cond:
  7744  	// result: (CMOVQNE x y cond)
  7745  	for {
  7746  		_ = v.Args[2]
  7747  		x := v.Args[0]
  7748  		y := v.Args[1]
  7749  		v_2 := v.Args[2]
  7750  		if v_2.Op != OpAMD64InvertFlags {
  7751  			break
  7752  		}
  7753  		cond := v_2.Args[0]
  7754  		v.reset(OpAMD64CMOVQNE)
  7755  		v.AddArg(x)
  7756  		v.AddArg(y)
  7757  		v.AddArg(cond)
  7758  		return true
  7759  	}
  7760  	// match: (CMOVQNE y _ (FlagEQ))
  7761  	// cond:
  7762  	// result: y
  7763  	for {
  7764  		_ = v.Args[2]
  7765  		y := v.Args[0]
  7766  		v_2 := v.Args[2]
  7767  		if v_2.Op != OpAMD64FlagEQ {
  7768  			break
  7769  		}
  7770  		v.reset(OpCopy)
  7771  		v.Type = y.Type
  7772  		v.AddArg(y)
  7773  		return true
  7774  	}
  7775  	// match: (CMOVQNE _ x (FlagGT_UGT))
  7776  	// cond:
  7777  	// result: x
  7778  	for {
  7779  		_ = v.Args[2]
  7780  		x := v.Args[1]
  7781  		v_2 := v.Args[2]
  7782  		if v_2.Op != OpAMD64FlagGT_UGT {
  7783  			break
  7784  		}
  7785  		v.reset(OpCopy)
  7786  		v.Type = x.Type
  7787  		v.AddArg(x)
  7788  		return true
  7789  	}
  7790  	// match: (CMOVQNE _ x (FlagGT_ULT))
  7791  	// cond:
  7792  	// result: x
  7793  	for {
  7794  		_ = v.Args[2]
  7795  		x := v.Args[1]
  7796  		v_2 := v.Args[2]
  7797  		if v_2.Op != OpAMD64FlagGT_ULT {
  7798  			break
  7799  		}
  7800  		v.reset(OpCopy)
  7801  		v.Type = x.Type
  7802  		v.AddArg(x)
  7803  		return true
  7804  	}
  7805  	// match: (CMOVQNE _ x (FlagLT_ULT))
  7806  	// cond:
  7807  	// result: x
  7808  	for {
  7809  		_ = v.Args[2]
  7810  		x := v.Args[1]
  7811  		v_2 := v.Args[2]
  7812  		if v_2.Op != OpAMD64FlagLT_ULT {
  7813  			break
  7814  		}
  7815  		v.reset(OpCopy)
  7816  		v.Type = x.Type
  7817  		v.AddArg(x)
  7818  		return true
  7819  	}
  7820  	// match: (CMOVQNE _ x (FlagLT_UGT))
  7821  	// cond:
  7822  	// result: x
  7823  	for {
  7824  		_ = v.Args[2]
  7825  		x := v.Args[1]
  7826  		v_2 := v.Args[2]
  7827  		if v_2.Op != OpAMD64FlagLT_UGT {
  7828  			break
  7829  		}
  7830  		v.reset(OpCopy)
  7831  		v.Type = x.Type
  7832  		v.AddArg(x)
  7833  		return true
  7834  	}
  7835  	return false
  7836  }
  7837  func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool {
  7838  	// match: (CMOVWCC x y (InvertFlags cond))
  7839  	// cond:
  7840  	// result: (CMOVWLS x y cond)
  7841  	for {
  7842  		_ = v.Args[2]
  7843  		x := v.Args[0]
  7844  		y := v.Args[1]
  7845  		v_2 := v.Args[2]
  7846  		if v_2.Op != OpAMD64InvertFlags {
  7847  			break
  7848  		}
  7849  		cond := v_2.Args[0]
  7850  		v.reset(OpAMD64CMOVWLS)
  7851  		v.AddArg(x)
  7852  		v.AddArg(y)
  7853  		v.AddArg(cond)
  7854  		return true
  7855  	}
  7856  	// match: (CMOVWCC _ x (FlagEQ))
  7857  	// cond:
  7858  	// result: x
  7859  	for {
  7860  		_ = v.Args[2]
  7861  		x := v.Args[1]
  7862  		v_2 := v.Args[2]
  7863  		if v_2.Op != OpAMD64FlagEQ {
  7864  			break
  7865  		}
  7866  		v.reset(OpCopy)
  7867  		v.Type = x.Type
  7868  		v.AddArg(x)
  7869  		return true
  7870  	}
  7871  	// match: (CMOVWCC _ x (FlagGT_UGT))
  7872  	// cond:
  7873  	// result: x
  7874  	for {
  7875  		_ = v.Args[2]
  7876  		x := v.Args[1]
  7877  		v_2 := v.Args[2]
  7878  		if v_2.Op != OpAMD64FlagGT_UGT {
  7879  			break
  7880  		}
  7881  		v.reset(OpCopy)
  7882  		v.Type = x.Type
  7883  		v.AddArg(x)
  7884  		return true
  7885  	}
  7886  	// match: (CMOVWCC y _ (FlagGT_ULT))
  7887  	// cond:
  7888  	// result: y
  7889  	for {
  7890  		_ = v.Args[2]
  7891  		y := v.Args[0]
  7892  		v_2 := v.Args[2]
  7893  		if v_2.Op != OpAMD64FlagGT_ULT {
  7894  			break
  7895  		}
  7896  		v.reset(OpCopy)
  7897  		v.Type = y.Type
  7898  		v.AddArg(y)
  7899  		return true
  7900  	}
  7901  	// match: (CMOVWCC y _ (FlagLT_ULT))
  7902  	// cond:
  7903  	// result: y
  7904  	for {
  7905  		_ = v.Args[2]
  7906  		y := v.Args[0]
  7907  		v_2 := v.Args[2]
  7908  		if v_2.Op != OpAMD64FlagLT_ULT {
  7909  			break
  7910  		}
  7911  		v.reset(OpCopy)
  7912  		v.Type = y.Type
  7913  		v.AddArg(y)
  7914  		return true
  7915  	}
  7916  	// match: (CMOVWCC _ x (FlagLT_UGT))
  7917  	// cond:
  7918  	// result: x
  7919  	for {
  7920  		_ = v.Args[2]
  7921  		x := v.Args[1]
  7922  		v_2 := v.Args[2]
  7923  		if v_2.Op != OpAMD64FlagLT_UGT {
  7924  			break
  7925  		}
  7926  		v.reset(OpCopy)
  7927  		v.Type = x.Type
  7928  		v.AddArg(x)
  7929  		return true
  7930  	}
  7931  	return false
  7932  }
  7933  func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool {
  7934  	// match: (CMOVWCS x y (InvertFlags cond))
  7935  	// cond:
  7936  	// result: (CMOVWHI x y cond)
  7937  	for {
  7938  		_ = v.Args[2]
  7939  		x := v.Args[0]
  7940  		y := v.Args[1]
  7941  		v_2 := v.Args[2]
  7942  		if v_2.Op != OpAMD64InvertFlags {
  7943  			break
  7944  		}
  7945  		cond := v_2.Args[0]
  7946  		v.reset(OpAMD64CMOVWHI)
  7947  		v.AddArg(x)
  7948  		v.AddArg(y)
  7949  		v.AddArg(cond)
  7950  		return true
  7951  	}
  7952  	// match: (CMOVWCS y _ (FlagEQ))
  7953  	// cond:
  7954  	// result: y
  7955  	for {
  7956  		_ = v.Args[2]
  7957  		y := v.Args[0]
  7958  		v_2 := v.Args[2]
  7959  		if v_2.Op != OpAMD64FlagEQ {
  7960  			break
  7961  		}
  7962  		v.reset(OpCopy)
  7963  		v.Type = y.Type
  7964  		v.AddArg(y)
  7965  		return true
  7966  	}
  7967  	// match: (CMOVWCS y _ (FlagGT_UGT))
  7968  	// cond:
  7969  	// result: y
  7970  	for {
  7971  		_ = v.Args[2]
  7972  		y := v.Args[0]
  7973  		v_2 := v.Args[2]
  7974  		if v_2.Op != OpAMD64FlagGT_UGT {
  7975  			break
  7976  		}
  7977  		v.reset(OpCopy)
  7978  		v.Type = y.Type
  7979  		v.AddArg(y)
  7980  		return true
  7981  	}
  7982  	// match: (CMOVWCS _ x (FlagGT_ULT))
  7983  	// cond:
  7984  	// result: x
  7985  	for {
  7986  		_ = v.Args[2]
  7987  		x := v.Args[1]
  7988  		v_2 := v.Args[2]
  7989  		if v_2.Op != OpAMD64FlagGT_ULT {
  7990  			break
  7991  		}
  7992  		v.reset(OpCopy)
  7993  		v.Type = x.Type
  7994  		v.AddArg(x)
  7995  		return true
  7996  	}
  7997  	// match: (CMOVWCS _ x (FlagLT_ULT))
  7998  	// cond:
  7999  	// result: x
  8000  	for {
  8001  		_ = v.Args[2]
  8002  		x := v.Args[1]
  8003  		v_2 := v.Args[2]
  8004  		if v_2.Op != OpAMD64FlagLT_ULT {
  8005  			break
  8006  		}
  8007  		v.reset(OpCopy)
  8008  		v.Type = x.Type
  8009  		v.AddArg(x)
  8010  		return true
  8011  	}
  8012  	// match: (CMOVWCS y _ (FlagLT_UGT))
  8013  	// cond:
  8014  	// result: y
  8015  	for {
  8016  		_ = v.Args[2]
  8017  		y := v.Args[0]
  8018  		v_2 := v.Args[2]
  8019  		if v_2.Op != OpAMD64FlagLT_UGT {
  8020  			break
  8021  		}
  8022  		v.reset(OpCopy)
  8023  		v.Type = y.Type
  8024  		v.AddArg(y)
  8025  		return true
  8026  	}
  8027  	return false
  8028  }
  8029  func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool {
  8030  	// match: (CMOVWEQ x y (InvertFlags cond))
  8031  	// cond:
  8032  	// result: (CMOVWEQ x y cond)
  8033  	for {
  8034  		_ = v.Args[2]
  8035  		x := v.Args[0]
  8036  		y := v.Args[1]
  8037  		v_2 := v.Args[2]
  8038  		if v_2.Op != OpAMD64InvertFlags {
  8039  			break
  8040  		}
  8041  		cond := v_2.Args[0]
  8042  		v.reset(OpAMD64CMOVWEQ)
  8043  		v.AddArg(x)
  8044  		v.AddArg(y)
  8045  		v.AddArg(cond)
  8046  		return true
  8047  	}
  8048  	// match: (CMOVWEQ _ x (FlagEQ))
  8049  	// cond:
  8050  	// result: x
  8051  	for {
  8052  		_ = v.Args[2]
  8053  		x := v.Args[1]
  8054  		v_2 := v.Args[2]
  8055  		if v_2.Op != OpAMD64FlagEQ {
  8056  			break
  8057  		}
  8058  		v.reset(OpCopy)
  8059  		v.Type = x.Type
  8060  		v.AddArg(x)
  8061  		return true
  8062  	}
  8063  	// match: (CMOVWEQ y _ (FlagGT_UGT))
  8064  	// cond:
  8065  	// result: y
  8066  	for {
  8067  		_ = v.Args[2]
  8068  		y := v.Args[0]
  8069  		v_2 := v.Args[2]
  8070  		if v_2.Op != OpAMD64FlagGT_UGT {
  8071  			break
  8072  		}
  8073  		v.reset(OpCopy)
  8074  		v.Type = y.Type
  8075  		v.AddArg(y)
  8076  		return true
  8077  	}
  8078  	// match: (CMOVWEQ y _ (FlagGT_ULT))
  8079  	// cond:
  8080  	// result: y
  8081  	for {
  8082  		_ = v.Args[2]
  8083  		y := v.Args[0]
  8084  		v_2 := v.Args[2]
  8085  		if v_2.Op != OpAMD64FlagGT_ULT {
  8086  			break
  8087  		}
  8088  		v.reset(OpCopy)
  8089  		v.Type = y.Type
  8090  		v.AddArg(y)
  8091  		return true
  8092  	}
  8093  	// match: (CMOVWEQ y _ (FlagLT_ULT))
  8094  	// cond:
  8095  	// result: y
  8096  	for {
  8097  		_ = v.Args[2]
  8098  		y := v.Args[0]
  8099  		v_2 := v.Args[2]
  8100  		if v_2.Op != OpAMD64FlagLT_ULT {
  8101  			break
  8102  		}
  8103  		v.reset(OpCopy)
  8104  		v.Type = y.Type
  8105  		v.AddArg(y)
  8106  		return true
  8107  	}
  8108  	// match: (CMOVWEQ y _ (FlagLT_UGT))
  8109  	// cond:
  8110  	// result: y
  8111  	for {
  8112  		_ = v.Args[2]
  8113  		y := v.Args[0]
  8114  		v_2 := v.Args[2]
  8115  		if v_2.Op != OpAMD64FlagLT_UGT {
  8116  			break
  8117  		}
  8118  		v.reset(OpCopy)
  8119  		v.Type = y.Type
  8120  		v.AddArg(y)
  8121  		return true
  8122  	}
  8123  	return false
  8124  }
  8125  func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool {
  8126  	// match: (CMOVWGE x y (InvertFlags cond))
  8127  	// cond:
  8128  	// result: (CMOVWLE x y cond)
  8129  	for {
  8130  		_ = v.Args[2]
  8131  		x := v.Args[0]
  8132  		y := v.Args[1]
  8133  		v_2 := v.Args[2]
  8134  		if v_2.Op != OpAMD64InvertFlags {
  8135  			break
  8136  		}
  8137  		cond := v_2.Args[0]
  8138  		v.reset(OpAMD64CMOVWLE)
  8139  		v.AddArg(x)
  8140  		v.AddArg(y)
  8141  		v.AddArg(cond)
  8142  		return true
  8143  	}
  8144  	// match: (CMOVWGE _ x (FlagEQ))
  8145  	// cond:
  8146  	// result: x
  8147  	for {
  8148  		_ = v.Args[2]
  8149  		x := v.Args[1]
  8150  		v_2 := v.Args[2]
  8151  		if v_2.Op != OpAMD64FlagEQ {
  8152  			break
  8153  		}
  8154  		v.reset(OpCopy)
  8155  		v.Type = x.Type
  8156  		v.AddArg(x)
  8157  		return true
  8158  	}
  8159  	// match: (CMOVWGE _ x (FlagGT_UGT))
  8160  	// cond:
  8161  	// result: x
  8162  	for {
  8163  		_ = v.Args[2]
  8164  		x := v.Args[1]
  8165  		v_2 := v.Args[2]
  8166  		if v_2.Op != OpAMD64FlagGT_UGT {
  8167  			break
  8168  		}
  8169  		v.reset(OpCopy)
  8170  		v.Type = x.Type
  8171  		v.AddArg(x)
  8172  		return true
  8173  	}
  8174  	// match: (CMOVWGE _ x (FlagGT_ULT))
  8175  	// cond:
  8176  	// result: x
  8177  	for {
  8178  		_ = v.Args[2]
  8179  		x := v.Args[1]
  8180  		v_2 := v.Args[2]
  8181  		if v_2.Op != OpAMD64FlagGT_ULT {
  8182  			break
  8183  		}
  8184  		v.reset(OpCopy)
  8185  		v.Type = x.Type
  8186  		v.AddArg(x)
  8187  		return true
  8188  	}
  8189  	// match: (CMOVWGE y _ (FlagLT_ULT))
  8190  	// cond:
  8191  	// result: y
  8192  	for {
  8193  		_ = v.Args[2]
  8194  		y := v.Args[0]
  8195  		v_2 := v.Args[2]
  8196  		if v_2.Op != OpAMD64FlagLT_ULT {
  8197  			break
  8198  		}
  8199  		v.reset(OpCopy)
  8200  		v.Type = y.Type
  8201  		v.AddArg(y)
  8202  		return true
  8203  	}
  8204  	// match: (CMOVWGE y _ (FlagLT_UGT))
  8205  	// cond:
  8206  	// result: y
  8207  	for {
  8208  		_ = v.Args[2]
  8209  		y := v.Args[0]
  8210  		v_2 := v.Args[2]
  8211  		if v_2.Op != OpAMD64FlagLT_UGT {
  8212  			break
  8213  		}
  8214  		v.reset(OpCopy)
  8215  		v.Type = y.Type
  8216  		v.AddArg(y)
  8217  		return true
  8218  	}
  8219  	return false
  8220  }
  8221  func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool {
  8222  	// match: (CMOVWGT x y (InvertFlags cond))
  8223  	// cond:
  8224  	// result: (CMOVWLT x y cond)
  8225  	for {
  8226  		_ = v.Args[2]
  8227  		x := v.Args[0]
  8228  		y := v.Args[1]
  8229  		v_2 := v.Args[2]
  8230  		if v_2.Op != OpAMD64InvertFlags {
  8231  			break
  8232  		}
  8233  		cond := v_2.Args[0]
  8234  		v.reset(OpAMD64CMOVWLT)
  8235  		v.AddArg(x)
  8236  		v.AddArg(y)
  8237  		v.AddArg(cond)
  8238  		return true
  8239  	}
  8240  	// match: (CMOVWGT y _ (FlagEQ))
  8241  	// cond:
  8242  	// result: y
  8243  	for {
  8244  		_ = v.Args[2]
  8245  		y := v.Args[0]
  8246  		v_2 := v.Args[2]
  8247  		if v_2.Op != OpAMD64FlagEQ {
  8248  			break
  8249  		}
  8250  		v.reset(OpCopy)
  8251  		v.Type = y.Type
  8252  		v.AddArg(y)
  8253  		return true
  8254  	}
  8255  	// match: (CMOVWGT _ x (FlagGT_UGT))
  8256  	// cond:
  8257  	// result: x
  8258  	for {
  8259  		_ = v.Args[2]
  8260  		x := v.Args[1]
  8261  		v_2 := v.Args[2]
  8262  		if v_2.Op != OpAMD64FlagGT_UGT {
  8263  			break
  8264  		}
  8265  		v.reset(OpCopy)
  8266  		v.Type = x.Type
  8267  		v.AddArg(x)
  8268  		return true
  8269  	}
  8270  	// match: (CMOVWGT _ x (FlagGT_ULT))
  8271  	// cond:
  8272  	// result: x
  8273  	for {
  8274  		_ = v.Args[2]
  8275  		x := v.Args[1]
  8276  		v_2 := v.Args[2]
  8277  		if v_2.Op != OpAMD64FlagGT_ULT {
  8278  			break
  8279  		}
  8280  		v.reset(OpCopy)
  8281  		v.Type = x.Type
  8282  		v.AddArg(x)
  8283  		return true
  8284  	}
  8285  	// match: (CMOVWGT y _ (FlagLT_ULT))
  8286  	// cond:
  8287  	// result: y
  8288  	for {
  8289  		_ = v.Args[2]
  8290  		y := v.Args[0]
  8291  		v_2 := v.Args[2]
  8292  		if v_2.Op != OpAMD64FlagLT_ULT {
  8293  			break
  8294  		}
  8295  		v.reset(OpCopy)
  8296  		v.Type = y.Type
  8297  		v.AddArg(y)
  8298  		return true
  8299  	}
  8300  	// match: (CMOVWGT y _ (FlagLT_UGT))
  8301  	// cond:
  8302  	// result: y
  8303  	for {
  8304  		_ = v.Args[2]
  8305  		y := v.Args[0]
  8306  		v_2 := v.Args[2]
  8307  		if v_2.Op != OpAMD64FlagLT_UGT {
  8308  			break
  8309  		}
  8310  		v.reset(OpCopy)
  8311  		v.Type = y.Type
  8312  		v.AddArg(y)
  8313  		return true
  8314  	}
  8315  	return false
  8316  }
  8317  func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool {
  8318  	// match: (CMOVWHI x y (InvertFlags cond))
  8319  	// cond:
  8320  	// result: (CMOVWCS x y cond)
  8321  	for {
  8322  		_ = v.Args[2]
  8323  		x := v.Args[0]
  8324  		y := v.Args[1]
  8325  		v_2 := v.Args[2]
  8326  		if v_2.Op != OpAMD64InvertFlags {
  8327  			break
  8328  		}
  8329  		cond := v_2.Args[0]
  8330  		v.reset(OpAMD64CMOVWCS)
  8331  		v.AddArg(x)
  8332  		v.AddArg(y)
  8333  		v.AddArg(cond)
  8334  		return true
  8335  	}
  8336  	// match: (CMOVWHI y _ (FlagEQ))
  8337  	// cond:
  8338  	// result: y
  8339  	for {
  8340  		_ = v.Args[2]
  8341  		y := v.Args[0]
  8342  		v_2 := v.Args[2]
  8343  		if v_2.Op != OpAMD64FlagEQ {
  8344  			break
  8345  		}
  8346  		v.reset(OpCopy)
  8347  		v.Type = y.Type
  8348  		v.AddArg(y)
  8349  		return true
  8350  	}
  8351  	// match: (CMOVWHI _ x (FlagGT_UGT))
  8352  	// cond:
  8353  	// result: x
  8354  	for {
  8355  		_ = v.Args[2]
  8356  		x := v.Args[1]
  8357  		v_2 := v.Args[2]
  8358  		if v_2.Op != OpAMD64FlagGT_UGT {
  8359  			break
  8360  		}
  8361  		v.reset(OpCopy)
  8362  		v.Type = x.Type
  8363  		v.AddArg(x)
  8364  		return true
  8365  	}
  8366  	// match: (CMOVWHI y _ (FlagGT_ULT))
  8367  	// cond:
  8368  	// result: y
  8369  	for {
  8370  		_ = v.Args[2]
  8371  		y := v.Args[0]
  8372  		v_2 := v.Args[2]
  8373  		if v_2.Op != OpAMD64FlagGT_ULT {
  8374  			break
  8375  		}
  8376  		v.reset(OpCopy)
  8377  		v.Type = y.Type
  8378  		v.AddArg(y)
  8379  		return true
  8380  	}
  8381  	// match: (CMOVWHI y _ (FlagLT_ULT))
  8382  	// cond:
  8383  	// result: y
  8384  	for {
  8385  		_ = v.Args[2]
  8386  		y := v.Args[0]
  8387  		v_2 := v.Args[2]
  8388  		if v_2.Op != OpAMD64FlagLT_ULT {
  8389  			break
  8390  		}
  8391  		v.reset(OpCopy)
  8392  		v.Type = y.Type
  8393  		v.AddArg(y)
  8394  		return true
  8395  	}
  8396  	// match: (CMOVWHI _ x (FlagLT_UGT))
  8397  	// cond:
  8398  	// result: x
  8399  	for {
  8400  		_ = v.Args[2]
  8401  		x := v.Args[1]
  8402  		v_2 := v.Args[2]
  8403  		if v_2.Op != OpAMD64FlagLT_UGT {
  8404  			break
  8405  		}
  8406  		v.reset(OpCopy)
  8407  		v.Type = x.Type
  8408  		v.AddArg(x)
  8409  		return true
  8410  	}
  8411  	return false
  8412  }
  8413  func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool {
  8414  	// match: (CMOVWLE x y (InvertFlags cond))
  8415  	// cond:
  8416  	// result: (CMOVWGE x y cond)
  8417  	for {
  8418  		_ = v.Args[2]
  8419  		x := v.Args[0]
  8420  		y := v.Args[1]
  8421  		v_2 := v.Args[2]
  8422  		if v_2.Op != OpAMD64InvertFlags {
  8423  			break
  8424  		}
  8425  		cond := v_2.Args[0]
  8426  		v.reset(OpAMD64CMOVWGE)
  8427  		v.AddArg(x)
  8428  		v.AddArg(y)
  8429  		v.AddArg(cond)
  8430  		return true
  8431  	}
  8432  	// match: (CMOVWLE _ x (FlagEQ))
  8433  	// cond:
  8434  	// result: x
  8435  	for {
  8436  		_ = v.Args[2]
  8437  		x := v.Args[1]
  8438  		v_2 := v.Args[2]
  8439  		if v_2.Op != OpAMD64FlagEQ {
  8440  			break
  8441  		}
  8442  		v.reset(OpCopy)
  8443  		v.Type = x.Type
  8444  		v.AddArg(x)
  8445  		return true
  8446  	}
  8447  	// match: (CMOVWLE y _ (FlagGT_UGT))
  8448  	// cond:
  8449  	// result: y
  8450  	for {
  8451  		_ = v.Args[2]
  8452  		y := v.Args[0]
  8453  		v_2 := v.Args[2]
  8454  		if v_2.Op != OpAMD64FlagGT_UGT {
  8455  			break
  8456  		}
  8457  		v.reset(OpCopy)
  8458  		v.Type = y.Type
  8459  		v.AddArg(y)
  8460  		return true
  8461  	}
  8462  	// match: (CMOVWLE y _ (FlagGT_ULT))
  8463  	// cond:
  8464  	// result: y
  8465  	for {
  8466  		_ = v.Args[2]
  8467  		y := v.Args[0]
  8468  		v_2 := v.Args[2]
  8469  		if v_2.Op != OpAMD64FlagGT_ULT {
  8470  			break
  8471  		}
  8472  		v.reset(OpCopy)
  8473  		v.Type = y.Type
  8474  		v.AddArg(y)
  8475  		return true
  8476  	}
  8477  	// match: (CMOVWLE _ x (FlagLT_ULT))
  8478  	// cond:
  8479  	// result: x
  8480  	for {
  8481  		_ = v.Args[2]
  8482  		x := v.Args[1]
  8483  		v_2 := v.Args[2]
  8484  		if v_2.Op != OpAMD64FlagLT_ULT {
  8485  			break
  8486  		}
  8487  		v.reset(OpCopy)
  8488  		v.Type = x.Type
  8489  		v.AddArg(x)
  8490  		return true
  8491  	}
  8492  	// match: (CMOVWLE _ x (FlagLT_UGT))
  8493  	// cond:
  8494  	// result: x
  8495  	for {
  8496  		_ = v.Args[2]
  8497  		x := v.Args[1]
  8498  		v_2 := v.Args[2]
  8499  		if v_2.Op != OpAMD64FlagLT_UGT {
  8500  			break
  8501  		}
  8502  		v.reset(OpCopy)
  8503  		v.Type = x.Type
  8504  		v.AddArg(x)
  8505  		return true
  8506  	}
  8507  	return false
  8508  }
  8509  func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool {
  8510  	// match: (CMOVWLS x y (InvertFlags cond))
  8511  	// cond:
  8512  	// result: (CMOVWCC x y cond)
  8513  	for {
  8514  		_ = v.Args[2]
  8515  		x := v.Args[0]
  8516  		y := v.Args[1]
  8517  		v_2 := v.Args[2]
  8518  		if v_2.Op != OpAMD64InvertFlags {
  8519  			break
  8520  		}
  8521  		cond := v_2.Args[0]
  8522  		v.reset(OpAMD64CMOVWCC)
  8523  		v.AddArg(x)
  8524  		v.AddArg(y)
  8525  		v.AddArg(cond)
  8526  		return true
  8527  	}
  8528  	// match: (CMOVWLS _ x (FlagEQ))
  8529  	// cond:
  8530  	// result: x
  8531  	for {
  8532  		_ = v.Args[2]
  8533  		x := v.Args[1]
  8534  		v_2 := v.Args[2]
  8535  		if v_2.Op != OpAMD64FlagEQ {
  8536  			break
  8537  		}
  8538  		v.reset(OpCopy)
  8539  		v.Type = x.Type
  8540  		v.AddArg(x)
  8541  		return true
  8542  	}
  8543  	// match: (CMOVWLS y _ (FlagGT_UGT))
  8544  	// cond:
  8545  	// result: y
  8546  	for {
  8547  		_ = v.Args[2]
  8548  		y := v.Args[0]
  8549  		v_2 := v.Args[2]
  8550  		if v_2.Op != OpAMD64FlagGT_UGT {
  8551  			break
  8552  		}
  8553  		v.reset(OpCopy)
  8554  		v.Type = y.Type
  8555  		v.AddArg(y)
  8556  		return true
  8557  	}
  8558  	// match: (CMOVWLS _ x (FlagGT_ULT))
  8559  	// cond:
  8560  	// result: x
  8561  	for {
  8562  		_ = v.Args[2]
  8563  		x := v.Args[1]
  8564  		v_2 := v.Args[2]
  8565  		if v_2.Op != OpAMD64FlagGT_ULT {
  8566  			break
  8567  		}
  8568  		v.reset(OpCopy)
  8569  		v.Type = x.Type
  8570  		v.AddArg(x)
  8571  		return true
  8572  	}
  8573  	// match: (CMOVWLS _ x (FlagLT_ULT))
  8574  	// cond:
  8575  	// result: x
  8576  	for {
  8577  		_ = v.Args[2]
  8578  		x := v.Args[1]
  8579  		v_2 := v.Args[2]
  8580  		if v_2.Op != OpAMD64FlagLT_ULT {
  8581  			break
  8582  		}
  8583  		v.reset(OpCopy)
  8584  		v.Type = x.Type
  8585  		v.AddArg(x)
  8586  		return true
  8587  	}
  8588  	// match: (CMOVWLS y _ (FlagLT_UGT))
  8589  	// cond:
  8590  	// result: y
  8591  	for {
  8592  		_ = v.Args[2]
  8593  		y := v.Args[0]
  8594  		v_2 := v.Args[2]
  8595  		if v_2.Op != OpAMD64FlagLT_UGT {
  8596  			break
  8597  		}
  8598  		v.reset(OpCopy)
  8599  		v.Type = y.Type
  8600  		v.AddArg(y)
  8601  		return true
  8602  	}
  8603  	return false
  8604  }
  8605  func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool {
  8606  	// match: (CMOVWLT x y (InvertFlags cond))
  8607  	// cond:
  8608  	// result: (CMOVWGT x y cond)
  8609  	for {
  8610  		_ = v.Args[2]
  8611  		x := v.Args[0]
  8612  		y := v.Args[1]
  8613  		v_2 := v.Args[2]
  8614  		if v_2.Op != OpAMD64InvertFlags {
  8615  			break
  8616  		}
  8617  		cond := v_2.Args[0]
  8618  		v.reset(OpAMD64CMOVWGT)
  8619  		v.AddArg(x)
  8620  		v.AddArg(y)
  8621  		v.AddArg(cond)
  8622  		return true
  8623  	}
  8624  	// match: (CMOVWLT y _ (FlagEQ))
  8625  	// cond:
  8626  	// result: y
  8627  	for {
  8628  		_ = v.Args[2]
  8629  		y := v.Args[0]
  8630  		v_2 := v.Args[2]
  8631  		if v_2.Op != OpAMD64FlagEQ {
  8632  			break
  8633  		}
  8634  		v.reset(OpCopy)
  8635  		v.Type = y.Type
  8636  		v.AddArg(y)
  8637  		return true
  8638  	}
  8639  	// match: (CMOVWLT y _ (FlagGT_UGT))
  8640  	// cond:
  8641  	// result: y
  8642  	for {
  8643  		_ = v.Args[2]
  8644  		y := v.Args[0]
  8645  		v_2 := v.Args[2]
  8646  		if v_2.Op != OpAMD64FlagGT_UGT {
  8647  			break
  8648  		}
  8649  		v.reset(OpCopy)
  8650  		v.Type = y.Type
  8651  		v.AddArg(y)
  8652  		return true
  8653  	}
  8654  	// match: (CMOVWLT y _ (FlagGT_ULT))
  8655  	// cond:
  8656  	// result: y
  8657  	for {
  8658  		_ = v.Args[2]
  8659  		y := v.Args[0]
  8660  		v_2 := v.Args[2]
  8661  		if v_2.Op != OpAMD64FlagGT_ULT {
  8662  			break
  8663  		}
  8664  		v.reset(OpCopy)
  8665  		v.Type = y.Type
  8666  		v.AddArg(y)
  8667  		return true
  8668  	}
  8669  	// match: (CMOVWLT _ x (FlagLT_ULT))
  8670  	// cond:
  8671  	// result: x
  8672  	for {
  8673  		_ = v.Args[2]
  8674  		x := v.Args[1]
  8675  		v_2 := v.Args[2]
  8676  		if v_2.Op != OpAMD64FlagLT_ULT {
  8677  			break
  8678  		}
  8679  		v.reset(OpCopy)
  8680  		v.Type = x.Type
  8681  		v.AddArg(x)
  8682  		return true
  8683  	}
  8684  	// match: (CMOVWLT _ x (FlagLT_UGT))
  8685  	// cond:
  8686  	// result: x
  8687  	for {
  8688  		_ = v.Args[2]
  8689  		x := v.Args[1]
  8690  		v_2 := v.Args[2]
  8691  		if v_2.Op != OpAMD64FlagLT_UGT {
  8692  			break
  8693  		}
  8694  		v.reset(OpCopy)
  8695  		v.Type = x.Type
  8696  		v.AddArg(x)
  8697  		return true
  8698  	}
  8699  	return false
  8700  }
  8701  func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool {
  8702  	// match: (CMOVWNE x y (InvertFlags cond))
  8703  	// cond:
  8704  	// result: (CMOVWNE x y cond)
  8705  	for {
  8706  		_ = v.Args[2]
  8707  		x := v.Args[0]
  8708  		y := v.Args[1]
  8709  		v_2 := v.Args[2]
  8710  		if v_2.Op != OpAMD64InvertFlags {
  8711  			break
  8712  		}
  8713  		cond := v_2.Args[0]
  8714  		v.reset(OpAMD64CMOVWNE)
  8715  		v.AddArg(x)
  8716  		v.AddArg(y)
  8717  		v.AddArg(cond)
  8718  		return true
  8719  	}
  8720  	// match: (CMOVWNE y _ (FlagEQ))
  8721  	// cond:
  8722  	// result: y
  8723  	for {
  8724  		_ = v.Args[2]
  8725  		y := v.Args[0]
  8726  		v_2 := v.Args[2]
  8727  		if v_2.Op != OpAMD64FlagEQ {
  8728  			break
  8729  		}
  8730  		v.reset(OpCopy)
  8731  		v.Type = y.Type
  8732  		v.AddArg(y)
  8733  		return true
  8734  	}
  8735  	// match: (CMOVWNE _ x (FlagGT_UGT))
  8736  	// cond:
  8737  	// result: x
  8738  	for {
  8739  		_ = v.Args[2]
  8740  		x := v.Args[1]
  8741  		v_2 := v.Args[2]
  8742  		if v_2.Op != OpAMD64FlagGT_UGT {
  8743  			break
  8744  		}
  8745  		v.reset(OpCopy)
  8746  		v.Type = x.Type
  8747  		v.AddArg(x)
  8748  		return true
  8749  	}
  8750  	// match: (CMOVWNE _ x (FlagGT_ULT))
  8751  	// cond:
  8752  	// result: x
  8753  	for {
  8754  		_ = v.Args[2]
  8755  		x := v.Args[1]
  8756  		v_2 := v.Args[2]
  8757  		if v_2.Op != OpAMD64FlagGT_ULT {
  8758  			break
  8759  		}
  8760  		v.reset(OpCopy)
  8761  		v.Type = x.Type
  8762  		v.AddArg(x)
  8763  		return true
  8764  	}
  8765  	// match: (CMOVWNE _ x (FlagLT_ULT))
  8766  	// cond:
  8767  	// result: x
  8768  	for {
  8769  		_ = v.Args[2]
  8770  		x := v.Args[1]
  8771  		v_2 := v.Args[2]
  8772  		if v_2.Op != OpAMD64FlagLT_ULT {
  8773  			break
  8774  		}
  8775  		v.reset(OpCopy)
  8776  		v.Type = x.Type
  8777  		v.AddArg(x)
  8778  		return true
  8779  	}
  8780  	// match: (CMOVWNE _ x (FlagLT_UGT))
  8781  	// cond:
  8782  	// result: x
  8783  	for {
  8784  		_ = v.Args[2]
  8785  		x := v.Args[1]
  8786  		v_2 := v.Args[2]
  8787  		if v_2.Op != OpAMD64FlagLT_UGT {
  8788  			break
  8789  		}
  8790  		v.reset(OpCopy)
  8791  		v.Type = x.Type
  8792  		v.AddArg(x)
  8793  		return true
  8794  	}
  8795  	return false
  8796  }
  8797  func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
  8798  	b := v.Block
  8799  	_ = b
  8800  	// match: (CMPB x (MOVLconst [c]))
  8801  	// cond:
  8802  	// result: (CMPBconst x [int64(int8(c))])
  8803  	for {
  8804  		_ = v.Args[1]
  8805  		x := v.Args[0]
  8806  		v_1 := v.Args[1]
  8807  		if v_1.Op != OpAMD64MOVLconst {
  8808  			break
  8809  		}
  8810  		c := v_1.AuxInt
  8811  		v.reset(OpAMD64CMPBconst)
  8812  		v.AuxInt = int64(int8(c))
  8813  		v.AddArg(x)
  8814  		return true
  8815  	}
  8816  	// match: (CMPB (MOVLconst [c]) x)
  8817  	// cond:
  8818  	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
  8819  	for {
  8820  		_ = v.Args[1]
  8821  		v_0 := v.Args[0]
  8822  		if v_0.Op != OpAMD64MOVLconst {
  8823  			break
  8824  		}
  8825  		c := v_0.AuxInt
  8826  		x := v.Args[1]
  8827  		v.reset(OpAMD64InvertFlags)
  8828  		v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
  8829  		v0.AuxInt = int64(int8(c))
  8830  		v0.AddArg(x)
  8831  		v.AddArg(v0)
  8832  		return true
  8833  	}
  8834  	// match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
  8835  	// cond: canMergeLoad(v, l) && clobber(l)
  8836  	// result: (CMPBload {sym} [off] ptr x mem)
  8837  	for {
  8838  		_ = v.Args[1]
  8839  		l := v.Args[0]
  8840  		if l.Op != OpAMD64MOVBload {
  8841  			break
  8842  		}
  8843  		off := l.AuxInt
  8844  		sym := l.Aux
  8845  		_ = l.Args[1]
  8846  		ptr := l.Args[0]
  8847  		mem := l.Args[1]
  8848  		x := v.Args[1]
  8849  		if !(canMergeLoad(v, l) && clobber(l)) {
  8850  			break
  8851  		}
  8852  		v.reset(OpAMD64CMPBload)
  8853  		v.AuxInt = off
  8854  		v.Aux = sym
  8855  		v.AddArg(ptr)
  8856  		v.AddArg(x)
  8857  		v.AddArg(mem)
  8858  		return true
  8859  	}
  8860  	// match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
  8861  	// cond: canMergeLoad(v, l) && clobber(l)
  8862  	// result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
  8863  	for {
  8864  		_ = v.Args[1]
  8865  		x := v.Args[0]
  8866  		l := v.Args[1]
  8867  		if l.Op != OpAMD64MOVBload {
  8868  			break
  8869  		}
  8870  		off := l.AuxInt
  8871  		sym := l.Aux
  8872  		_ = l.Args[1]
  8873  		ptr := l.Args[0]
  8874  		mem := l.Args[1]
  8875  		if !(canMergeLoad(v, l) && clobber(l)) {
  8876  			break
  8877  		}
  8878  		v.reset(OpAMD64InvertFlags)
  8879  		v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
  8880  		v0.AuxInt = off
  8881  		v0.Aux = sym
  8882  		v0.AddArg(ptr)
  8883  		v0.AddArg(x)
  8884  		v0.AddArg(mem)
  8885  		v.AddArg(v0)
  8886  		return true
  8887  	}
  8888  	return false
  8889  }
  8890  func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
  8891  	b := v.Block
  8892  	_ = b
  8893  	// match: (CMPBconst (MOVLconst [x]) [y])
  8894  	// cond: int8(x)==int8(y)
  8895  	// result: (FlagEQ)
  8896  	for {
  8897  		y := v.AuxInt
  8898  		v_0 := v.Args[0]
  8899  		if v_0.Op != OpAMD64MOVLconst {
  8900  			break
  8901  		}
  8902  		x := v_0.AuxInt
  8903  		if !(int8(x) == int8(y)) {
  8904  			break
  8905  		}
  8906  		v.reset(OpAMD64FlagEQ)
  8907  		return true
  8908  	}
  8909  	// match: (CMPBconst (MOVLconst [x]) [y])
  8910  	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
  8911  	// result: (FlagLT_ULT)
  8912  	for {
  8913  		y := v.AuxInt
  8914  		v_0 := v.Args[0]
  8915  		if v_0.Op != OpAMD64MOVLconst {
  8916  			break
  8917  		}
  8918  		x := v_0.AuxInt
  8919  		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
  8920  			break
  8921  		}
  8922  		v.reset(OpAMD64FlagLT_ULT)
  8923  		return true
  8924  	}
  8925  	// match: (CMPBconst (MOVLconst [x]) [y])
  8926  	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
  8927  	// result: (FlagLT_UGT)
  8928  	for {
  8929  		y := v.AuxInt
  8930  		v_0 := v.Args[0]
  8931  		if v_0.Op != OpAMD64MOVLconst {
  8932  			break
  8933  		}
  8934  		x := v_0.AuxInt
  8935  		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
  8936  			break
  8937  		}
  8938  		v.reset(OpAMD64FlagLT_UGT)
  8939  		return true
  8940  	}
  8941  	// match: (CMPBconst (MOVLconst [x]) [y])
  8942  	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
  8943  	// result: (FlagGT_ULT)
  8944  	for {
  8945  		y := v.AuxInt
  8946  		v_0 := v.Args[0]
  8947  		if v_0.Op != OpAMD64MOVLconst {
  8948  			break
  8949  		}
  8950  		x := v_0.AuxInt
  8951  		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
  8952  			break
  8953  		}
  8954  		v.reset(OpAMD64FlagGT_ULT)
  8955  		return true
  8956  	}
  8957  	// match: (CMPBconst (MOVLconst [x]) [y])
  8958  	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
  8959  	// result: (FlagGT_UGT)
  8960  	for {
  8961  		y := v.AuxInt
  8962  		v_0 := v.Args[0]
  8963  		if v_0.Op != OpAMD64MOVLconst {
  8964  			break
  8965  		}
  8966  		x := v_0.AuxInt
  8967  		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
  8968  			break
  8969  		}
  8970  		v.reset(OpAMD64FlagGT_UGT)
  8971  		return true
  8972  	}
  8973  	// match: (CMPBconst (ANDLconst _ [m]) [n])
  8974  	// cond: 0 <= int8(m) && int8(m) < int8(n)
  8975  	// result: (FlagLT_ULT)
  8976  	for {
  8977  		n := v.AuxInt
  8978  		v_0 := v.Args[0]
  8979  		if v_0.Op != OpAMD64ANDLconst {
  8980  			break
  8981  		}
  8982  		m := v_0.AuxInt
  8983  		if !(0 <= int8(m) && int8(m) < int8(n)) {
  8984  			break
  8985  		}
  8986  		v.reset(OpAMD64FlagLT_ULT)
  8987  		return true
  8988  	}
  8989  	// match: (CMPBconst (ANDL x y) [0])
  8990  	// cond:
  8991  	// result: (TESTB x y)
  8992  	for {
  8993  		if v.AuxInt != 0 {
  8994  			break
  8995  		}
  8996  		v_0 := v.Args[0]
  8997  		if v_0.Op != OpAMD64ANDL {
  8998  			break
  8999  		}
  9000  		_ = v_0.Args[1]
  9001  		x := v_0.Args[0]
  9002  		y := v_0.Args[1]
  9003  		v.reset(OpAMD64TESTB)
  9004  		v.AddArg(x)
  9005  		v.AddArg(y)
  9006  		return true
  9007  	}
  9008  	// match: (CMPBconst (ANDLconst [c] x) [0])
  9009  	// cond:
  9010  	// result: (TESTBconst [int64(int8(c))] x)
  9011  	for {
  9012  		if v.AuxInt != 0 {
  9013  			break
  9014  		}
  9015  		v_0 := v.Args[0]
  9016  		if v_0.Op != OpAMD64ANDLconst {
  9017  			break
  9018  		}
  9019  		c := v_0.AuxInt
  9020  		x := v_0.Args[0]
  9021  		v.reset(OpAMD64TESTBconst)
  9022  		v.AuxInt = int64(int8(c))
  9023  		v.AddArg(x)
  9024  		return true
  9025  	}
  9026  	// match: (CMPBconst x [0])
  9027  	// cond:
  9028  	// result: (TESTB x x)
  9029  	for {
  9030  		if v.AuxInt != 0 {
  9031  			break
  9032  		}
  9033  		x := v.Args[0]
  9034  		v.reset(OpAMD64TESTB)
  9035  		v.AddArg(x)
  9036  		v.AddArg(x)
  9037  		return true
  9038  	}
  9039  	// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
  9040  	// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
  9041  	// result: @l.Block (CMPBconstload {sym} [makeValAndOff(c,off)] ptr mem)
  9042  	for {
  9043  		c := v.AuxInt
  9044  		l := v.Args[0]
  9045  		if l.Op != OpAMD64MOVBload {
  9046  			break
  9047  		}
  9048  		off := l.AuxInt
  9049  		sym := l.Aux
  9050  		_ = l.Args[1]
  9051  		ptr := l.Args[0]
  9052  		mem := l.Args[1]
  9053  		if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
  9054  			break
  9055  		}
  9056  		b = l.Block
  9057  		v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
  9058  		v.reset(OpCopy)
  9059  		v.AddArg(v0)
  9060  		v0.AuxInt = makeValAndOff(c, off)
  9061  		v0.Aux = sym
  9062  		v0.AddArg(ptr)
  9063  		v0.AddArg(mem)
  9064  		return true
  9065  	}
  9066  	return false
  9067  }
  9068  func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool {
  9069  	// match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
  9070  	// cond: ValAndOff(valoff1).canAdd(off2)
  9071  	// result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
  9072  	for {
  9073  		valoff1 := v.AuxInt
  9074  		sym := v.Aux
  9075  		_ = v.Args[1]
  9076  		v_0 := v.Args[0]
  9077  		if v_0.Op != OpAMD64ADDQconst {
  9078  			break
  9079  		}
  9080  		off2 := v_0.AuxInt
  9081  		base := v_0.Args[0]
  9082  		mem := v.Args[1]
  9083  		if !(ValAndOff(valoff1).canAdd(off2)) {
  9084  			break
  9085  		}
  9086  		v.reset(OpAMD64CMPBconstload)
  9087  		v.AuxInt = ValAndOff(valoff1).add(off2)
  9088  		v.Aux = sym
  9089  		v.AddArg(base)
  9090  		v.AddArg(mem)
  9091  		return true
  9092  	}
  9093  	// match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9094  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  9095  	// result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  9096  	for {
  9097  		valoff1 := v.AuxInt
  9098  		sym1 := v.Aux
  9099  		_ = v.Args[1]
  9100  		v_0 := v.Args[0]
  9101  		if v_0.Op != OpAMD64LEAQ {
  9102  			break
  9103  		}
  9104  		off2 := v_0.AuxInt
  9105  		sym2 := v_0.Aux
  9106  		base := v_0.Args[0]
  9107  		mem := v.Args[1]
  9108  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  9109  			break
  9110  		}
  9111  		v.reset(OpAMD64CMPBconstload)
  9112  		v.AuxInt = ValAndOff(valoff1).add(off2)
  9113  		v.Aux = mergeSym(sym1, sym2)
  9114  		v.AddArg(base)
  9115  		v.AddArg(mem)
  9116  		return true
  9117  	}
  9118  	return false
  9119  }
  9120  func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool {
  9121  	// match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
  9122  	// cond: is32Bit(off1+off2)
  9123  	// result: (CMPBload [off1+off2] {sym} base val mem)
  9124  	for {
  9125  		off1 := v.AuxInt
  9126  		sym := v.Aux
  9127  		_ = v.Args[2]
  9128  		v_0 := v.Args[0]
  9129  		if v_0.Op != OpAMD64ADDQconst {
  9130  			break
  9131  		}
  9132  		off2 := v_0.AuxInt
  9133  		base := v_0.Args[0]
  9134  		val := v.Args[1]
  9135  		mem := v.Args[2]
  9136  		if !(is32Bit(off1 + off2)) {
  9137  			break
  9138  		}
  9139  		v.reset(OpAMD64CMPBload)
  9140  		v.AuxInt = off1 + off2
  9141  		v.Aux = sym
  9142  		v.AddArg(base)
  9143  		v.AddArg(val)
  9144  		v.AddArg(mem)
  9145  		return true
  9146  	}
  9147  	// match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  9148  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9149  	// result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  9150  	for {
  9151  		off1 := v.AuxInt
  9152  		sym1 := v.Aux
  9153  		_ = v.Args[2]
  9154  		v_0 := v.Args[0]
  9155  		if v_0.Op != OpAMD64LEAQ {
  9156  			break
  9157  		}
  9158  		off2 := v_0.AuxInt
  9159  		sym2 := v_0.Aux
  9160  		base := v_0.Args[0]
  9161  		val := v.Args[1]
  9162  		mem := v.Args[2]
  9163  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9164  			break
  9165  		}
  9166  		v.reset(OpAMD64CMPBload)
  9167  		v.AuxInt = off1 + off2
  9168  		v.Aux = mergeSym(sym1, sym2)
  9169  		v.AddArg(base)
  9170  		v.AddArg(val)
  9171  		v.AddArg(mem)
  9172  		return true
  9173  	}
  9174  	// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
  9175  	// cond: validValAndOff(int64(int8(c)),off)
  9176  	// result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
  9177  	for {
  9178  		off := v.AuxInt
  9179  		sym := v.Aux
  9180  		_ = v.Args[2]
  9181  		ptr := v.Args[0]
  9182  		v_1 := v.Args[1]
  9183  		if v_1.Op != OpAMD64MOVLconst {
  9184  			break
  9185  		}
  9186  		c := v_1.AuxInt
  9187  		mem := v.Args[2]
  9188  		if !(validValAndOff(int64(int8(c)), off)) {
  9189  			break
  9190  		}
  9191  		v.reset(OpAMD64CMPBconstload)
  9192  		v.AuxInt = makeValAndOff(int64(int8(c)), off)
  9193  		v.Aux = sym
  9194  		v.AddArg(ptr)
  9195  		v.AddArg(mem)
  9196  		return true
  9197  	}
  9198  	return false
  9199  }
  9200  func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
  9201  	b := v.Block
  9202  	_ = b
  9203  	// match: (CMPL x (MOVLconst [c]))
  9204  	// cond:
  9205  	// result: (CMPLconst x [c])
  9206  	for {
  9207  		_ = v.Args[1]
  9208  		x := v.Args[0]
  9209  		v_1 := v.Args[1]
  9210  		if v_1.Op != OpAMD64MOVLconst {
  9211  			break
  9212  		}
  9213  		c := v_1.AuxInt
  9214  		v.reset(OpAMD64CMPLconst)
  9215  		v.AuxInt = c
  9216  		v.AddArg(x)
  9217  		return true
  9218  	}
  9219  	// match: (CMPL (MOVLconst [c]) x)
  9220  	// cond:
  9221  	// result: (InvertFlags (CMPLconst x [c]))
  9222  	for {
  9223  		_ = v.Args[1]
  9224  		v_0 := v.Args[0]
  9225  		if v_0.Op != OpAMD64MOVLconst {
  9226  			break
  9227  		}
  9228  		c := v_0.AuxInt
  9229  		x := v.Args[1]
  9230  		v.reset(OpAMD64InvertFlags)
  9231  		v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
  9232  		v0.AuxInt = c
  9233  		v0.AddArg(x)
  9234  		v.AddArg(v0)
  9235  		return true
  9236  	}
  9237  	// match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
  9238  	// cond: canMergeLoad(v, l) && clobber(l)
  9239  	// result: (CMPLload {sym} [off] ptr x mem)
  9240  	for {
  9241  		_ = v.Args[1]
  9242  		l := v.Args[0]
  9243  		if l.Op != OpAMD64MOVLload {
  9244  			break
  9245  		}
  9246  		off := l.AuxInt
  9247  		sym := l.Aux
  9248  		_ = l.Args[1]
  9249  		ptr := l.Args[0]
  9250  		mem := l.Args[1]
  9251  		x := v.Args[1]
  9252  		if !(canMergeLoad(v, l) && clobber(l)) {
  9253  			break
  9254  		}
  9255  		v.reset(OpAMD64CMPLload)
  9256  		v.AuxInt = off
  9257  		v.Aux = sym
  9258  		v.AddArg(ptr)
  9259  		v.AddArg(x)
  9260  		v.AddArg(mem)
  9261  		return true
  9262  	}
  9263  	// match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
  9264  	// cond: canMergeLoad(v, l) && clobber(l)
  9265  	// result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
  9266  	for {
  9267  		_ = v.Args[1]
  9268  		x := v.Args[0]
  9269  		l := v.Args[1]
  9270  		if l.Op != OpAMD64MOVLload {
  9271  			break
  9272  		}
  9273  		off := l.AuxInt
  9274  		sym := l.Aux
  9275  		_ = l.Args[1]
  9276  		ptr := l.Args[0]
  9277  		mem := l.Args[1]
  9278  		if !(canMergeLoad(v, l) && clobber(l)) {
  9279  			break
  9280  		}
  9281  		v.reset(OpAMD64InvertFlags)
  9282  		v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
  9283  		v0.AuxInt = off
  9284  		v0.Aux = sym
  9285  		v0.AddArg(ptr)
  9286  		v0.AddArg(x)
  9287  		v0.AddArg(mem)
  9288  		v.AddArg(v0)
  9289  		return true
  9290  	}
  9291  	return false
  9292  }
  9293  func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool {
  9294  	// match: (CMPLconst (MOVLconst [x]) [y])
  9295  	// cond: int32(x)==int32(y)
  9296  	// result: (FlagEQ)
  9297  	for {
  9298  		y := v.AuxInt
  9299  		v_0 := v.Args[0]
  9300  		if v_0.Op != OpAMD64MOVLconst {
  9301  			break
  9302  		}
  9303  		x := v_0.AuxInt
  9304  		if !(int32(x) == int32(y)) {
  9305  			break
  9306  		}
  9307  		v.reset(OpAMD64FlagEQ)
  9308  		return true
  9309  	}
  9310  	// match: (CMPLconst (MOVLconst [x]) [y])
  9311  	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
  9312  	// result: (FlagLT_ULT)
  9313  	for {
  9314  		y := v.AuxInt
  9315  		v_0 := v.Args[0]
  9316  		if v_0.Op != OpAMD64MOVLconst {
  9317  			break
  9318  		}
  9319  		x := v_0.AuxInt
  9320  		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
  9321  			break
  9322  		}
  9323  		v.reset(OpAMD64FlagLT_ULT)
  9324  		return true
  9325  	}
  9326  	// match: (CMPLconst (MOVLconst [x]) [y])
  9327  	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
  9328  	// result: (FlagLT_UGT)
  9329  	for {
  9330  		y := v.AuxInt
  9331  		v_0 := v.Args[0]
  9332  		if v_0.Op != OpAMD64MOVLconst {
  9333  			break
  9334  		}
  9335  		x := v_0.AuxInt
  9336  		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
  9337  			break
  9338  		}
  9339  		v.reset(OpAMD64FlagLT_UGT)
  9340  		return true
  9341  	}
  9342  	// match: (CMPLconst (MOVLconst [x]) [y])
  9343  	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
  9344  	// result: (FlagGT_ULT)
  9345  	for {
  9346  		y := v.AuxInt
  9347  		v_0 := v.Args[0]
  9348  		if v_0.Op != OpAMD64MOVLconst {
  9349  			break
  9350  		}
  9351  		x := v_0.AuxInt
  9352  		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
  9353  			break
  9354  		}
  9355  		v.reset(OpAMD64FlagGT_ULT)
  9356  		return true
  9357  	}
  9358  	// match: (CMPLconst (MOVLconst [x]) [y])
  9359  	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
  9360  	// result: (FlagGT_UGT)
  9361  	for {
  9362  		y := v.AuxInt
  9363  		v_0 := v.Args[0]
  9364  		if v_0.Op != OpAMD64MOVLconst {
  9365  			break
  9366  		}
  9367  		x := v_0.AuxInt
  9368  		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
  9369  			break
  9370  		}
  9371  		v.reset(OpAMD64FlagGT_UGT)
  9372  		return true
  9373  	}
  9374  	// match: (CMPLconst (SHRLconst _ [c]) [n])
  9375  	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
  9376  	// result: (FlagLT_ULT)
  9377  	for {
  9378  		n := v.AuxInt
  9379  		v_0 := v.Args[0]
  9380  		if v_0.Op != OpAMD64SHRLconst {
  9381  			break
  9382  		}
  9383  		c := v_0.AuxInt
  9384  		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
  9385  			break
  9386  		}
  9387  		v.reset(OpAMD64FlagLT_ULT)
  9388  		return true
  9389  	}
  9390  	// match: (CMPLconst (ANDLconst _ [m]) [n])
  9391  	// cond: 0 <= int32(m) && int32(m) < int32(n)
  9392  	// result: (FlagLT_ULT)
  9393  	for {
  9394  		n := v.AuxInt
  9395  		v_0 := v.Args[0]
  9396  		if v_0.Op != OpAMD64ANDLconst {
  9397  			break
  9398  		}
  9399  		m := v_0.AuxInt
  9400  		if !(0 <= int32(m) && int32(m) < int32(n)) {
  9401  			break
  9402  		}
  9403  		v.reset(OpAMD64FlagLT_ULT)
  9404  		return true
  9405  	}
  9406  	// match: (CMPLconst (ANDL x y) [0])
  9407  	// cond:
  9408  	// result: (TESTL x y)
  9409  	for {
  9410  		if v.AuxInt != 0 {
  9411  			break
  9412  		}
  9413  		v_0 := v.Args[0]
  9414  		if v_0.Op != OpAMD64ANDL {
  9415  			break
  9416  		}
  9417  		_ = v_0.Args[1]
  9418  		x := v_0.Args[0]
  9419  		y := v_0.Args[1]
  9420  		v.reset(OpAMD64TESTL)
  9421  		v.AddArg(x)
  9422  		v.AddArg(y)
  9423  		return true
  9424  	}
  9425  	// match: (CMPLconst (ANDLconst [c] x) [0])
  9426  	// cond:
  9427  	// result: (TESTLconst [c] x)
  9428  	for {
  9429  		if v.AuxInt != 0 {
  9430  			break
  9431  		}
  9432  		v_0 := v.Args[0]
  9433  		if v_0.Op != OpAMD64ANDLconst {
  9434  			break
  9435  		}
  9436  		c := v_0.AuxInt
  9437  		x := v_0.Args[0]
  9438  		v.reset(OpAMD64TESTLconst)
  9439  		v.AuxInt = c
  9440  		v.AddArg(x)
  9441  		return true
  9442  	}
  9443  	// match: (CMPLconst x [0])
  9444  	// cond:
  9445  	// result: (TESTL x x)
  9446  	for {
  9447  		if v.AuxInt != 0 {
  9448  			break
  9449  		}
  9450  		x := v.Args[0]
  9451  		v.reset(OpAMD64TESTL)
  9452  		v.AddArg(x)
  9453  		v.AddArg(x)
  9454  		return true
  9455  	}
  9456  	return false
  9457  }
  9458  func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool {
  9459  	b := v.Block
  9460  	_ = b
  9461  	// match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
  9462  	// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
  9463  	// result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
  9464  	for {
  9465  		c := v.AuxInt
  9466  		l := v.Args[0]
  9467  		if l.Op != OpAMD64MOVLload {
  9468  			break
  9469  		}
  9470  		off := l.AuxInt
  9471  		sym := l.Aux
  9472  		_ = l.Args[1]
  9473  		ptr := l.Args[0]
  9474  		mem := l.Args[1]
  9475  		if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
  9476  			break
  9477  		}
  9478  		b = l.Block
  9479  		v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
  9480  		v.reset(OpCopy)
  9481  		v.AddArg(v0)
  9482  		v0.AuxInt = makeValAndOff(c, off)
  9483  		v0.Aux = sym
  9484  		v0.AddArg(ptr)
  9485  		v0.AddArg(mem)
  9486  		return true
  9487  	}
  9488  	return false
  9489  }
  9490  func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool {
  9491  	// match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
  9492  	// cond: ValAndOff(valoff1).canAdd(off2)
  9493  	// result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
  9494  	for {
  9495  		valoff1 := v.AuxInt
  9496  		sym := v.Aux
  9497  		_ = v.Args[1]
  9498  		v_0 := v.Args[0]
  9499  		if v_0.Op != OpAMD64ADDQconst {
  9500  			break
  9501  		}
  9502  		off2 := v_0.AuxInt
  9503  		base := v_0.Args[0]
  9504  		mem := v.Args[1]
  9505  		if !(ValAndOff(valoff1).canAdd(off2)) {
  9506  			break
  9507  		}
  9508  		v.reset(OpAMD64CMPLconstload)
  9509  		v.AuxInt = ValAndOff(valoff1).add(off2)
  9510  		v.Aux = sym
  9511  		v.AddArg(base)
  9512  		v.AddArg(mem)
  9513  		return true
  9514  	}
  9515  	// match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  9516  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
  9517  	// result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
  9518  	for {
  9519  		valoff1 := v.AuxInt
  9520  		sym1 := v.Aux
  9521  		_ = v.Args[1]
  9522  		v_0 := v.Args[0]
  9523  		if v_0.Op != OpAMD64LEAQ {
  9524  			break
  9525  		}
  9526  		off2 := v_0.AuxInt
  9527  		sym2 := v_0.Aux
  9528  		base := v_0.Args[0]
  9529  		mem := v.Args[1]
  9530  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
  9531  			break
  9532  		}
  9533  		v.reset(OpAMD64CMPLconstload)
  9534  		v.AuxInt = ValAndOff(valoff1).add(off2)
  9535  		v.Aux = mergeSym(sym1, sym2)
  9536  		v.AddArg(base)
  9537  		v.AddArg(mem)
  9538  		return true
  9539  	}
  9540  	return false
  9541  }
  9542  func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool {
  9543  	// match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
  9544  	// cond: is32Bit(off1+off2)
  9545  	// result: (CMPLload [off1+off2] {sym} base val mem)
  9546  	for {
  9547  		off1 := v.AuxInt
  9548  		sym := v.Aux
  9549  		_ = v.Args[2]
  9550  		v_0 := v.Args[0]
  9551  		if v_0.Op != OpAMD64ADDQconst {
  9552  			break
  9553  		}
  9554  		off2 := v_0.AuxInt
  9555  		base := v_0.Args[0]
  9556  		val := v.Args[1]
  9557  		mem := v.Args[2]
  9558  		if !(is32Bit(off1 + off2)) {
  9559  			break
  9560  		}
  9561  		v.reset(OpAMD64CMPLload)
  9562  		v.AuxInt = off1 + off2
  9563  		v.Aux = sym
  9564  		v.AddArg(base)
  9565  		v.AddArg(val)
  9566  		v.AddArg(mem)
  9567  		return true
  9568  	}
  9569  	// match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  9570  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
  9571  	// result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  9572  	for {
  9573  		off1 := v.AuxInt
  9574  		sym1 := v.Aux
  9575  		_ = v.Args[2]
  9576  		v_0 := v.Args[0]
  9577  		if v_0.Op != OpAMD64LEAQ {
  9578  			break
  9579  		}
  9580  		off2 := v_0.AuxInt
  9581  		sym2 := v_0.Aux
  9582  		base := v_0.Args[0]
  9583  		val := v.Args[1]
  9584  		mem := v.Args[2]
  9585  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
  9586  			break
  9587  		}
  9588  		v.reset(OpAMD64CMPLload)
  9589  		v.AuxInt = off1 + off2
  9590  		v.Aux = mergeSym(sym1, sym2)
  9591  		v.AddArg(base)
  9592  		v.AddArg(val)
  9593  		v.AddArg(mem)
  9594  		return true
  9595  	}
  9596  	// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
  9597  	// cond: validValAndOff(c,off)
  9598  	// result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
  9599  	for {
  9600  		off := v.AuxInt
  9601  		sym := v.Aux
  9602  		_ = v.Args[2]
  9603  		ptr := v.Args[0]
  9604  		v_1 := v.Args[1]
  9605  		if v_1.Op != OpAMD64MOVLconst {
  9606  			break
  9607  		}
  9608  		c := v_1.AuxInt
  9609  		mem := v.Args[2]
  9610  		if !(validValAndOff(c, off)) {
  9611  			break
  9612  		}
  9613  		v.reset(OpAMD64CMPLconstload)
  9614  		v.AuxInt = makeValAndOff(c, off)
  9615  		v.Aux = sym
  9616  		v.AddArg(ptr)
  9617  		v.AddArg(mem)
  9618  		return true
  9619  	}
  9620  	return false
  9621  }
  9622  func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
  9623  	b := v.Block
  9624  	_ = b
  9625  	// match: (CMPQ x (MOVQconst [c]))
  9626  	// cond: is32Bit(c)
  9627  	// result: (CMPQconst x [c])
  9628  	for {
  9629  		_ = v.Args[1]
  9630  		x := v.Args[0]
  9631  		v_1 := v.Args[1]
  9632  		if v_1.Op != OpAMD64MOVQconst {
  9633  			break
  9634  		}
  9635  		c := v_1.AuxInt
  9636  		if !(is32Bit(c)) {
  9637  			break
  9638  		}
  9639  		v.reset(OpAMD64CMPQconst)
  9640  		v.AuxInt = c
  9641  		v.AddArg(x)
  9642  		return true
  9643  	}
  9644  	// match: (CMPQ (MOVQconst [c]) x)
  9645  	// cond: is32Bit(c)
  9646  	// result: (InvertFlags (CMPQconst x [c]))
  9647  	for {
  9648  		_ = v.Args[1]
  9649  		v_0 := v.Args[0]
  9650  		if v_0.Op != OpAMD64MOVQconst {
  9651  			break
  9652  		}
  9653  		c := v_0.AuxInt
  9654  		x := v.Args[1]
  9655  		if !(is32Bit(c)) {
  9656  			break
  9657  		}
  9658  		v.reset(OpAMD64InvertFlags)
  9659  		v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
  9660  		v0.AuxInt = c
  9661  		v0.AddArg(x)
  9662  		v.AddArg(v0)
  9663  		return true
  9664  	}
  9665  	// match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
  9666  	// cond: canMergeLoad(v, l) && clobber(l)
  9667  	// result: (CMPQload {sym} [off] ptr x mem)
  9668  	for {
  9669  		_ = v.Args[1]
  9670  		l := v.Args[0]
  9671  		if l.Op != OpAMD64MOVQload {
  9672  			break
  9673  		}
  9674  		off := l.AuxInt
  9675  		sym := l.Aux
  9676  		_ = l.Args[1]
  9677  		ptr := l.Args[0]
  9678  		mem := l.Args[1]
  9679  		x := v.Args[1]
  9680  		if !(canMergeLoad(v, l) && clobber(l)) {
  9681  			break
  9682  		}
  9683  		v.reset(OpAMD64CMPQload)
  9684  		v.AuxInt = off
  9685  		v.Aux = sym
  9686  		v.AddArg(ptr)
  9687  		v.AddArg(x)
  9688  		v.AddArg(mem)
  9689  		return true
  9690  	}
  9691  	// match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
  9692  	// cond: canMergeLoad(v, l) && clobber(l)
  9693  	// result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
  9694  	for {
  9695  		_ = v.Args[1]
  9696  		x := v.Args[0]
  9697  		l := v.Args[1]
  9698  		if l.Op != OpAMD64MOVQload {
  9699  			break
  9700  		}
  9701  		off := l.AuxInt
  9702  		sym := l.Aux
  9703  		_ = l.Args[1]
  9704  		ptr := l.Args[0]
  9705  		mem := l.Args[1]
  9706  		if !(canMergeLoad(v, l) && clobber(l)) {
  9707  			break
  9708  		}
  9709  		v.reset(OpAMD64InvertFlags)
  9710  		v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
  9711  		v0.AuxInt = off
  9712  		v0.Aux = sym
  9713  		v0.AddArg(ptr)
  9714  		v0.AddArg(x)
  9715  		v0.AddArg(mem)
  9716  		v.AddArg(v0)
  9717  		return true
  9718  	}
  9719  	return false
  9720  }
  9721  func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool {
  9722  	// match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
  9723  	// cond:
  9724  	// result: (FlagLT_ULT)
  9725  	for {
  9726  		if v.AuxInt != 32 {
  9727  			break
  9728  		}
  9729  		v_0 := v.Args[0]
  9730  		if v_0.Op != OpAMD64NEGQ {
  9731  			break
  9732  		}
  9733  		v_0_0 := v_0.Args[0]
  9734  		if v_0_0.Op != OpAMD64ADDQconst {
  9735  			break
  9736  		}
  9737  		if v_0_0.AuxInt != -16 {
  9738  			break
  9739  		}
  9740  		v_0_0_0 := v_0_0.Args[0]
  9741  		if v_0_0_0.Op != OpAMD64ANDQconst {
  9742  			break
  9743  		}
  9744  		if v_0_0_0.AuxInt != 15 {
  9745  			break
  9746  		}
  9747  		v.reset(OpAMD64FlagLT_ULT)
  9748  		return true
  9749  	}
  9750  	// match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
  9751  	// cond:
  9752  	// result: (FlagLT_ULT)
  9753  	for {
  9754  		if v.AuxInt != 32 {
  9755  			break
  9756  		}
  9757  		v_0 := v.Args[0]
  9758  		if v_0.Op != OpAMD64NEGQ {
  9759  			break
  9760  		}
  9761  		v_0_0 := v_0.Args[0]
  9762  		if v_0_0.Op != OpAMD64ADDQconst {
  9763  			break
  9764  		}
  9765  		if v_0_0.AuxInt != -8 {
  9766  			break
  9767  		}
  9768  		v_0_0_0 := v_0_0.Args[0]
  9769  		if v_0_0_0.Op != OpAMD64ANDQconst {
  9770  			break
  9771  		}
  9772  		if v_0_0_0.AuxInt != 7 {
  9773  			break
  9774  		}
  9775  		v.reset(OpAMD64FlagLT_ULT)
  9776  		return true
  9777  	}
  9778  	// match: (CMPQconst (MOVQconst [x]) [y])
  9779  	// cond: x==y
  9780  	// result: (FlagEQ)
  9781  	for {
  9782  		y := v.AuxInt
  9783  		v_0 := v.Args[0]
  9784  		if v_0.Op != OpAMD64MOVQconst {
  9785  			break
  9786  		}
  9787  		x := v_0.AuxInt
  9788  		if !(x == y) {
  9789  			break
  9790  		}
  9791  		v.reset(OpAMD64FlagEQ)
  9792  		return true
  9793  	}
  9794  	// match: (CMPQconst (MOVQconst [x]) [y])
  9795  	// cond: x<y && uint64(x)<uint64(y)
  9796  	// result: (FlagLT_ULT)
  9797  	for {
  9798  		y := v.AuxInt
  9799  		v_0 := v.Args[0]
  9800  		if v_0.Op != OpAMD64MOVQconst {
  9801  			break
  9802  		}
  9803  		x := v_0.AuxInt
  9804  		if !(x < y && uint64(x) < uint64(y)) {
  9805  			break
  9806  		}
  9807  		v.reset(OpAMD64FlagLT_ULT)
  9808  		return true
  9809  	}
  9810  	// match: (CMPQconst (MOVQconst [x]) [y])
  9811  	// cond: x<y && uint64(x)>uint64(y)
  9812  	// result: (FlagLT_UGT)
  9813  	for {
  9814  		y := v.AuxInt
  9815  		v_0 := v.Args[0]
  9816  		if v_0.Op != OpAMD64MOVQconst {
  9817  			break
  9818  		}
  9819  		x := v_0.AuxInt
  9820  		if !(x < y && uint64(x) > uint64(y)) {
  9821  			break
  9822  		}
  9823  		v.reset(OpAMD64FlagLT_UGT)
  9824  		return true
  9825  	}
  9826  	// match: (CMPQconst (MOVQconst [x]) [y])
  9827  	// cond: x>y && uint64(x)<uint64(y)
  9828  	// result: (FlagGT_ULT)
  9829  	for {
  9830  		y := v.AuxInt
  9831  		v_0 := v.Args[0]
  9832  		if v_0.Op != OpAMD64MOVQconst {
  9833  			break
  9834  		}
  9835  		x := v_0.AuxInt
  9836  		if !(x > y && uint64(x) < uint64(y)) {
  9837  			break
  9838  		}
  9839  		v.reset(OpAMD64FlagGT_ULT)
  9840  		return true
  9841  	}
  9842  	// match: (CMPQconst (MOVQconst [x]) [y])
  9843  	// cond: x>y && uint64(x)>uint64(y)
  9844  	// result: (FlagGT_UGT)
  9845  	for {
  9846  		y := v.AuxInt
  9847  		v_0 := v.Args[0]
  9848  		if v_0.Op != OpAMD64MOVQconst {
  9849  			break
  9850  		}
  9851  		x := v_0.AuxInt
  9852  		if !(x > y && uint64(x) > uint64(y)) {
  9853  			break
  9854  		}
  9855  		v.reset(OpAMD64FlagGT_UGT)
  9856  		return true
  9857  	}
  9858  	// match: (CMPQconst (MOVBQZX _) [c])
  9859  	// cond: 0xFF < c
  9860  	// result: (FlagLT_ULT)
  9861  	for {
  9862  		c := v.AuxInt
  9863  		v_0 := v.Args[0]
  9864  		if v_0.Op != OpAMD64MOVBQZX {
  9865  			break
  9866  		}
  9867  		if !(0xFF < c) {
  9868  			break
  9869  		}
  9870  		v.reset(OpAMD64FlagLT_ULT)
  9871  		return true
  9872  	}
  9873  	// match: (CMPQconst (MOVWQZX _) [c])
  9874  	// cond: 0xFFFF < c
  9875  	// result: (FlagLT_ULT)
  9876  	for {
  9877  		c := v.AuxInt
  9878  		v_0 := v.Args[0]
  9879  		if v_0.Op != OpAMD64MOVWQZX {
  9880  			break
  9881  		}
  9882  		if !(0xFFFF < c) {
  9883  			break
  9884  		}
  9885  		v.reset(OpAMD64FlagLT_ULT)
  9886  		return true
  9887  	}
  9888  	// match: (CMPQconst (MOVLQZX _) [c])
  9889  	// cond: 0xFFFFFFFF < c
  9890  	// result: (FlagLT_ULT)
  9891  	for {
  9892  		c := v.AuxInt
  9893  		v_0 := v.Args[0]
  9894  		if v_0.Op != OpAMD64MOVLQZX {
  9895  			break
  9896  		}
  9897  		if !(0xFFFFFFFF < c) {
  9898  			break
  9899  		}
  9900  		v.reset(OpAMD64FlagLT_ULT)
  9901  		return true
  9902  	}
  9903  	return false
  9904  }
  9905  func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
  9906  	b := v.Block
  9907  	_ = b
  9908  	// match: (CMPQconst (SHRQconst _ [c]) [n])
  9909  	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
  9910  	// result: (FlagLT_ULT)
  9911  	for {
  9912  		n := v.AuxInt
  9913  		v_0 := v.Args[0]
  9914  		if v_0.Op != OpAMD64SHRQconst {
  9915  			break
  9916  		}
  9917  		c := v_0.AuxInt
  9918  		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
  9919  			break
  9920  		}
  9921  		v.reset(OpAMD64FlagLT_ULT)
  9922  		return true
  9923  	}
  9924  	// match: (CMPQconst (ANDQconst _ [m]) [n])
  9925  	// cond: 0 <= m && m < n
  9926  	// result: (FlagLT_ULT)
  9927  	for {
  9928  		n := v.AuxInt
  9929  		v_0 := v.Args[0]
  9930  		if v_0.Op != OpAMD64ANDQconst {
  9931  			break
  9932  		}
  9933  		m := v_0.AuxInt
  9934  		if !(0 <= m && m < n) {
  9935  			break
  9936  		}
  9937  		v.reset(OpAMD64FlagLT_ULT)
  9938  		return true
  9939  	}
  9940  	// match: (CMPQconst (ANDLconst _ [m]) [n])
  9941  	// cond: 0 <= m && m < n
  9942  	// result: (FlagLT_ULT)
  9943  	for {
  9944  		n := v.AuxInt
  9945  		v_0 := v.Args[0]
  9946  		if v_0.Op != OpAMD64ANDLconst {
  9947  			break
  9948  		}
  9949  		m := v_0.AuxInt
  9950  		if !(0 <= m && m < n) {
  9951  			break
  9952  		}
  9953  		v.reset(OpAMD64FlagLT_ULT)
  9954  		return true
  9955  	}
  9956  	// match: (CMPQconst (ANDQ x y) [0])
  9957  	// cond:
  9958  	// result: (TESTQ x y)
  9959  	for {
  9960  		if v.AuxInt != 0 {
  9961  			break
  9962  		}
  9963  		v_0 := v.Args[0]
  9964  		if v_0.Op != OpAMD64ANDQ {
  9965  			break
  9966  		}
  9967  		_ = v_0.Args[1]
  9968  		x := v_0.Args[0]
  9969  		y := v_0.Args[1]
  9970  		v.reset(OpAMD64TESTQ)
  9971  		v.AddArg(x)
  9972  		v.AddArg(y)
  9973  		return true
  9974  	}
  9975  	// match: (CMPQconst (ANDQconst [c] x) [0])
  9976  	// cond:
  9977  	// result: (TESTQconst [c] x)
  9978  	for {
  9979  		if v.AuxInt != 0 {
  9980  			break
  9981  		}
  9982  		v_0 := v.Args[0]
  9983  		if v_0.Op != OpAMD64ANDQconst {
  9984  			break
  9985  		}
  9986  		c := v_0.AuxInt
  9987  		x := v_0.Args[0]
  9988  		v.reset(OpAMD64TESTQconst)
  9989  		v.AuxInt = c
  9990  		v.AddArg(x)
  9991  		return true
  9992  	}
  9993  	// match: (CMPQconst x [0])
  9994  	// cond:
  9995  	// result: (TESTQ x x)
  9996  	for {
  9997  		if v.AuxInt != 0 {
  9998  			break
  9999  		}
 10000  		x := v.Args[0]
 10001  		v.reset(OpAMD64TESTQ)
 10002  		v.AddArg(x)
 10003  		v.AddArg(x)
 10004  		return true
 10005  	}
 10006  	// match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
 10007  	// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
 10008  	// result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
 10009  	for {
 10010  		c := v.AuxInt
 10011  		l := v.Args[0]
 10012  		if l.Op != OpAMD64MOVQload {
 10013  			break
 10014  		}
 10015  		off := l.AuxInt
 10016  		sym := l.Aux
 10017  		_ = l.Args[1]
 10018  		ptr := l.Args[0]
 10019  		mem := l.Args[1]
 10020  		if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
 10021  			break
 10022  		}
 10023  		b = l.Block
 10024  		v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
 10025  		v.reset(OpCopy)
 10026  		v.AddArg(v0)
 10027  		v0.AuxInt = makeValAndOff(c, off)
 10028  		v0.Aux = sym
 10029  		v0.AddArg(ptr)
 10030  		v0.AddArg(mem)
 10031  		return true
 10032  	}
 10033  	return false
 10034  }
 10035  func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool {
 10036  	// match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
 10037  	// cond: ValAndOff(valoff1).canAdd(off2)
 10038  	// result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
 10039  	for {
 10040  		valoff1 := v.AuxInt
 10041  		sym := v.Aux
 10042  		_ = v.Args[1]
 10043  		v_0 := v.Args[0]
 10044  		if v_0.Op != OpAMD64ADDQconst {
 10045  			break
 10046  		}
 10047  		off2 := v_0.AuxInt
 10048  		base := v_0.Args[0]
 10049  		mem := v.Args[1]
 10050  		if !(ValAndOff(valoff1).canAdd(off2)) {
 10051  			break
 10052  		}
 10053  		v.reset(OpAMD64CMPQconstload)
 10054  		v.AuxInt = ValAndOff(valoff1).add(off2)
 10055  		v.Aux = sym
 10056  		v.AddArg(base)
 10057  		v.AddArg(mem)
 10058  		return true
 10059  	}
 10060  	// match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
 10061  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
 10062  	// result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
 10063  	for {
 10064  		valoff1 := v.AuxInt
 10065  		sym1 := v.Aux
 10066  		_ = v.Args[1]
 10067  		v_0 := v.Args[0]
 10068  		if v_0.Op != OpAMD64LEAQ {
 10069  			break
 10070  		}
 10071  		off2 := v_0.AuxInt
 10072  		sym2 := v_0.Aux
 10073  		base := v_0.Args[0]
 10074  		mem := v.Args[1]
 10075  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
 10076  			break
 10077  		}
 10078  		v.reset(OpAMD64CMPQconstload)
 10079  		v.AuxInt = ValAndOff(valoff1).add(off2)
 10080  		v.Aux = mergeSym(sym1, sym2)
 10081  		v.AddArg(base)
 10082  		v.AddArg(mem)
 10083  		return true
 10084  	}
 10085  	return false
 10086  }
 10087  func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool {
 10088  	// match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
 10089  	// cond: is32Bit(off1+off2)
 10090  	// result: (CMPQload [off1+off2] {sym} base val mem)
 10091  	for {
 10092  		off1 := v.AuxInt
 10093  		sym := v.Aux
 10094  		_ = v.Args[2]
 10095  		v_0 := v.Args[0]
 10096  		if v_0.Op != OpAMD64ADDQconst {
 10097  			break
 10098  		}
 10099  		off2 := v_0.AuxInt
 10100  		base := v_0.Args[0]
 10101  		val := v.Args[1]
 10102  		mem := v.Args[2]
 10103  		if !(is32Bit(off1 + off2)) {
 10104  			break
 10105  		}
 10106  		v.reset(OpAMD64CMPQload)
 10107  		v.AuxInt = off1 + off2
 10108  		v.Aux = sym
 10109  		v.AddArg(base)
 10110  		v.AddArg(val)
 10111  		v.AddArg(mem)
 10112  		return true
 10113  	}
 10114  	// match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 10115  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10116  	// result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 10117  	for {
 10118  		off1 := v.AuxInt
 10119  		sym1 := v.Aux
 10120  		_ = v.Args[2]
 10121  		v_0 := v.Args[0]
 10122  		if v_0.Op != OpAMD64LEAQ {
 10123  			break
 10124  		}
 10125  		off2 := v_0.AuxInt
 10126  		sym2 := v_0.Aux
 10127  		base := v_0.Args[0]
 10128  		val := v.Args[1]
 10129  		mem := v.Args[2]
 10130  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10131  			break
 10132  		}
 10133  		v.reset(OpAMD64CMPQload)
 10134  		v.AuxInt = off1 + off2
 10135  		v.Aux = mergeSym(sym1, sym2)
 10136  		v.AddArg(base)
 10137  		v.AddArg(val)
 10138  		v.AddArg(mem)
 10139  		return true
 10140  	}
 10141  	// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
 10142  	// cond: validValAndOff(c,off)
 10143  	// result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
 10144  	for {
 10145  		off := v.AuxInt
 10146  		sym := v.Aux
 10147  		_ = v.Args[2]
 10148  		ptr := v.Args[0]
 10149  		v_1 := v.Args[1]
 10150  		if v_1.Op != OpAMD64MOVQconst {
 10151  			break
 10152  		}
 10153  		c := v_1.AuxInt
 10154  		mem := v.Args[2]
 10155  		if !(validValAndOff(c, off)) {
 10156  			break
 10157  		}
 10158  		v.reset(OpAMD64CMPQconstload)
 10159  		v.AuxInt = makeValAndOff(c, off)
 10160  		v.Aux = sym
 10161  		v.AddArg(ptr)
 10162  		v.AddArg(mem)
 10163  		return true
 10164  	}
 10165  	return false
 10166  }
 10167  func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
 10168  	b := v.Block
 10169  	_ = b
 10170  	// match: (CMPW x (MOVLconst [c]))
 10171  	// cond:
 10172  	// result: (CMPWconst x [int64(int16(c))])
 10173  	for {
 10174  		_ = v.Args[1]
 10175  		x := v.Args[0]
 10176  		v_1 := v.Args[1]
 10177  		if v_1.Op != OpAMD64MOVLconst {
 10178  			break
 10179  		}
 10180  		c := v_1.AuxInt
 10181  		v.reset(OpAMD64CMPWconst)
 10182  		v.AuxInt = int64(int16(c))
 10183  		v.AddArg(x)
 10184  		return true
 10185  	}
 10186  	// match: (CMPW (MOVLconst [c]) x)
 10187  	// cond:
 10188  	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
 10189  	for {
 10190  		_ = v.Args[1]
 10191  		v_0 := v.Args[0]
 10192  		if v_0.Op != OpAMD64MOVLconst {
 10193  			break
 10194  		}
 10195  		c := v_0.AuxInt
 10196  		x := v.Args[1]
 10197  		v.reset(OpAMD64InvertFlags)
 10198  		v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
 10199  		v0.AuxInt = int64(int16(c))
 10200  		v0.AddArg(x)
 10201  		v.AddArg(v0)
 10202  		return true
 10203  	}
 10204  	// match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
 10205  	// cond: canMergeLoad(v, l) && clobber(l)
 10206  	// result: (CMPWload {sym} [off] ptr x mem)
 10207  	for {
 10208  		_ = v.Args[1]
 10209  		l := v.Args[0]
 10210  		if l.Op != OpAMD64MOVWload {
 10211  			break
 10212  		}
 10213  		off := l.AuxInt
 10214  		sym := l.Aux
 10215  		_ = l.Args[1]
 10216  		ptr := l.Args[0]
 10217  		mem := l.Args[1]
 10218  		x := v.Args[1]
 10219  		if !(canMergeLoad(v, l) && clobber(l)) {
 10220  			break
 10221  		}
 10222  		v.reset(OpAMD64CMPWload)
 10223  		v.AuxInt = off
 10224  		v.Aux = sym
 10225  		v.AddArg(ptr)
 10226  		v.AddArg(x)
 10227  		v.AddArg(mem)
 10228  		return true
 10229  	}
 10230  	// match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
 10231  	// cond: canMergeLoad(v, l) && clobber(l)
 10232  	// result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
 10233  	for {
 10234  		_ = v.Args[1]
 10235  		x := v.Args[0]
 10236  		l := v.Args[1]
 10237  		if l.Op != OpAMD64MOVWload {
 10238  			break
 10239  		}
 10240  		off := l.AuxInt
 10241  		sym := l.Aux
 10242  		_ = l.Args[1]
 10243  		ptr := l.Args[0]
 10244  		mem := l.Args[1]
 10245  		if !(canMergeLoad(v, l) && clobber(l)) {
 10246  			break
 10247  		}
 10248  		v.reset(OpAMD64InvertFlags)
 10249  		v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
 10250  		v0.AuxInt = off
 10251  		v0.Aux = sym
 10252  		v0.AddArg(ptr)
 10253  		v0.AddArg(x)
 10254  		v0.AddArg(mem)
 10255  		v.AddArg(v0)
 10256  		return true
 10257  	}
 10258  	return false
 10259  }
 10260  func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
 10261  	b := v.Block
 10262  	_ = b
 10263  	// match: (CMPWconst (MOVLconst [x]) [y])
 10264  	// cond: int16(x)==int16(y)
 10265  	// result: (FlagEQ)
 10266  	for {
 10267  		y := v.AuxInt
 10268  		v_0 := v.Args[0]
 10269  		if v_0.Op != OpAMD64MOVLconst {
 10270  			break
 10271  		}
 10272  		x := v_0.AuxInt
 10273  		if !(int16(x) == int16(y)) {
 10274  			break
 10275  		}
 10276  		v.reset(OpAMD64FlagEQ)
 10277  		return true
 10278  	}
 10279  	// match: (CMPWconst (MOVLconst [x]) [y])
 10280  	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
 10281  	// result: (FlagLT_ULT)
 10282  	for {
 10283  		y := v.AuxInt
 10284  		v_0 := v.Args[0]
 10285  		if v_0.Op != OpAMD64MOVLconst {
 10286  			break
 10287  		}
 10288  		x := v_0.AuxInt
 10289  		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
 10290  			break
 10291  		}
 10292  		v.reset(OpAMD64FlagLT_ULT)
 10293  		return true
 10294  	}
 10295  	// match: (CMPWconst (MOVLconst [x]) [y])
 10296  	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
 10297  	// result: (FlagLT_UGT)
 10298  	for {
 10299  		y := v.AuxInt
 10300  		v_0 := v.Args[0]
 10301  		if v_0.Op != OpAMD64MOVLconst {
 10302  			break
 10303  		}
 10304  		x := v_0.AuxInt
 10305  		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
 10306  			break
 10307  		}
 10308  		v.reset(OpAMD64FlagLT_UGT)
 10309  		return true
 10310  	}
 10311  	// match: (CMPWconst (MOVLconst [x]) [y])
 10312  	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
 10313  	// result: (FlagGT_ULT)
 10314  	for {
 10315  		y := v.AuxInt
 10316  		v_0 := v.Args[0]
 10317  		if v_0.Op != OpAMD64MOVLconst {
 10318  			break
 10319  		}
 10320  		x := v_0.AuxInt
 10321  		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
 10322  			break
 10323  		}
 10324  		v.reset(OpAMD64FlagGT_ULT)
 10325  		return true
 10326  	}
 10327  	// match: (CMPWconst (MOVLconst [x]) [y])
 10328  	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
 10329  	// result: (FlagGT_UGT)
 10330  	for {
 10331  		y := v.AuxInt
 10332  		v_0 := v.Args[0]
 10333  		if v_0.Op != OpAMD64MOVLconst {
 10334  			break
 10335  		}
 10336  		x := v_0.AuxInt
 10337  		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
 10338  			break
 10339  		}
 10340  		v.reset(OpAMD64FlagGT_UGT)
 10341  		return true
 10342  	}
 10343  	// match: (CMPWconst (ANDLconst _ [m]) [n])
 10344  	// cond: 0 <= int16(m) && int16(m) < int16(n)
 10345  	// result: (FlagLT_ULT)
 10346  	for {
 10347  		n := v.AuxInt
 10348  		v_0 := v.Args[0]
 10349  		if v_0.Op != OpAMD64ANDLconst {
 10350  			break
 10351  		}
 10352  		m := v_0.AuxInt
 10353  		if !(0 <= int16(m) && int16(m) < int16(n)) {
 10354  			break
 10355  		}
 10356  		v.reset(OpAMD64FlagLT_ULT)
 10357  		return true
 10358  	}
 10359  	// match: (CMPWconst (ANDL x y) [0])
 10360  	// cond:
 10361  	// result: (TESTW x y)
 10362  	for {
 10363  		if v.AuxInt != 0 {
 10364  			break
 10365  		}
 10366  		v_0 := v.Args[0]
 10367  		if v_0.Op != OpAMD64ANDL {
 10368  			break
 10369  		}
 10370  		_ = v_0.Args[1]
 10371  		x := v_0.Args[0]
 10372  		y := v_0.Args[1]
 10373  		v.reset(OpAMD64TESTW)
 10374  		v.AddArg(x)
 10375  		v.AddArg(y)
 10376  		return true
 10377  	}
 10378  	// match: (CMPWconst (ANDLconst [c] x) [0])
 10379  	// cond:
 10380  	// result: (TESTWconst [int64(int16(c))] x)
 10381  	for {
 10382  		if v.AuxInt != 0 {
 10383  			break
 10384  		}
 10385  		v_0 := v.Args[0]
 10386  		if v_0.Op != OpAMD64ANDLconst {
 10387  			break
 10388  		}
 10389  		c := v_0.AuxInt
 10390  		x := v_0.Args[0]
 10391  		v.reset(OpAMD64TESTWconst)
 10392  		v.AuxInt = int64(int16(c))
 10393  		v.AddArg(x)
 10394  		return true
 10395  	}
 10396  	// match: (CMPWconst x [0])
 10397  	// cond:
 10398  	// result: (TESTW x x)
 10399  	for {
 10400  		if v.AuxInt != 0 {
 10401  			break
 10402  		}
 10403  		x := v.Args[0]
 10404  		v.reset(OpAMD64TESTW)
 10405  		v.AddArg(x)
 10406  		v.AddArg(x)
 10407  		return true
 10408  	}
 10409  	// match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
 10410  	// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
 10411  	// result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem)
 10412  	for {
 10413  		c := v.AuxInt
 10414  		l := v.Args[0]
 10415  		if l.Op != OpAMD64MOVWload {
 10416  			break
 10417  		}
 10418  		off := l.AuxInt
 10419  		sym := l.Aux
 10420  		_ = l.Args[1]
 10421  		ptr := l.Args[0]
 10422  		mem := l.Args[1]
 10423  		if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
 10424  			break
 10425  		}
 10426  		b = l.Block
 10427  		v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
 10428  		v.reset(OpCopy)
 10429  		v.AddArg(v0)
 10430  		v0.AuxInt = makeValAndOff(c, off)
 10431  		v0.Aux = sym
 10432  		v0.AddArg(ptr)
 10433  		v0.AddArg(mem)
 10434  		return true
 10435  	}
 10436  	return false
 10437  }
 10438  func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool {
 10439  	// match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
 10440  	// cond: ValAndOff(valoff1).canAdd(off2)
 10441  	// result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
 10442  	for {
 10443  		valoff1 := v.AuxInt
 10444  		sym := v.Aux
 10445  		_ = v.Args[1]
 10446  		v_0 := v.Args[0]
 10447  		if v_0.Op != OpAMD64ADDQconst {
 10448  			break
 10449  		}
 10450  		off2 := v_0.AuxInt
 10451  		base := v_0.Args[0]
 10452  		mem := v.Args[1]
 10453  		if !(ValAndOff(valoff1).canAdd(off2)) {
 10454  			break
 10455  		}
 10456  		v.reset(OpAMD64CMPWconstload)
 10457  		v.AuxInt = ValAndOff(valoff1).add(off2)
 10458  		v.Aux = sym
 10459  		v.AddArg(base)
 10460  		v.AddArg(mem)
 10461  		return true
 10462  	}
 10463  	// match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
 10464  	// cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
 10465  	// result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
 10466  	for {
 10467  		valoff1 := v.AuxInt
 10468  		sym1 := v.Aux
 10469  		_ = v.Args[1]
 10470  		v_0 := v.Args[0]
 10471  		if v_0.Op != OpAMD64LEAQ {
 10472  			break
 10473  		}
 10474  		off2 := v_0.AuxInt
 10475  		sym2 := v_0.Aux
 10476  		base := v_0.Args[0]
 10477  		mem := v.Args[1]
 10478  		if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
 10479  			break
 10480  		}
 10481  		v.reset(OpAMD64CMPWconstload)
 10482  		v.AuxInt = ValAndOff(valoff1).add(off2)
 10483  		v.Aux = mergeSym(sym1, sym2)
 10484  		v.AddArg(base)
 10485  		v.AddArg(mem)
 10486  		return true
 10487  	}
 10488  	return false
 10489  }
 10490  func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool {
 10491  	// match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
 10492  	// cond: is32Bit(off1+off2)
 10493  	// result: (CMPWload [off1+off2] {sym} base val mem)
 10494  	for {
 10495  		off1 := v.AuxInt
 10496  		sym := v.Aux
 10497  		_ = v.Args[2]
 10498  		v_0 := v.Args[0]
 10499  		if v_0.Op != OpAMD64ADDQconst {
 10500  			break
 10501  		}
 10502  		off2 := v_0.AuxInt
 10503  		base := v_0.Args[0]
 10504  		val := v.Args[1]
 10505  		mem := v.Args[2]
 10506  		if !(is32Bit(off1 + off2)) {
 10507  			break
 10508  		}
 10509  		v.reset(OpAMD64CMPWload)
 10510  		v.AuxInt = off1 + off2
 10511  		v.Aux = sym
 10512  		v.AddArg(base)
 10513  		v.AddArg(val)
 10514  		v.AddArg(mem)
 10515  		return true
 10516  	}
 10517  	// match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 10518  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10519  	// result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 10520  	for {
 10521  		off1 := v.AuxInt
 10522  		sym1 := v.Aux
 10523  		_ = v.Args[2]
 10524  		v_0 := v.Args[0]
 10525  		if v_0.Op != OpAMD64LEAQ {
 10526  			break
 10527  		}
 10528  		off2 := v_0.AuxInt
 10529  		sym2 := v_0.Aux
 10530  		base := v_0.Args[0]
 10531  		val := v.Args[1]
 10532  		mem := v.Args[2]
 10533  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10534  			break
 10535  		}
 10536  		v.reset(OpAMD64CMPWload)
 10537  		v.AuxInt = off1 + off2
 10538  		v.Aux = mergeSym(sym1, sym2)
 10539  		v.AddArg(base)
 10540  		v.AddArg(val)
 10541  		v.AddArg(mem)
 10542  		return true
 10543  	}
 10544  	// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
 10545  	// cond: validValAndOff(int64(int16(c)),off)
 10546  	// result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
 10547  	for {
 10548  		off := v.AuxInt
 10549  		sym := v.Aux
 10550  		_ = v.Args[2]
 10551  		ptr := v.Args[0]
 10552  		v_1 := v.Args[1]
 10553  		if v_1.Op != OpAMD64MOVLconst {
 10554  			break
 10555  		}
 10556  		c := v_1.AuxInt
 10557  		mem := v.Args[2]
 10558  		if !(validValAndOff(int64(int16(c)), off)) {
 10559  			break
 10560  		}
 10561  		v.reset(OpAMD64CMPWconstload)
 10562  		v.AuxInt = makeValAndOff(int64(int16(c)), off)
 10563  		v.Aux = sym
 10564  		v.AddArg(ptr)
 10565  		v.AddArg(mem)
 10566  		return true
 10567  	}
 10568  	return false
 10569  }
 10570  func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool {
 10571  	// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
 10572  	// cond: is32Bit(off1+off2)
 10573  	// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
 10574  	for {
 10575  		off1 := v.AuxInt
 10576  		sym := v.Aux
 10577  		_ = v.Args[3]
 10578  		v_0 := v.Args[0]
 10579  		if v_0.Op != OpAMD64ADDQconst {
 10580  			break
 10581  		}
 10582  		off2 := v_0.AuxInt
 10583  		ptr := v_0.Args[0]
 10584  		old := v.Args[1]
 10585  		new_ := v.Args[2]
 10586  		mem := v.Args[3]
 10587  		if !(is32Bit(off1 + off2)) {
 10588  			break
 10589  		}
 10590  		v.reset(OpAMD64CMPXCHGLlock)
 10591  		v.AuxInt = off1 + off2
 10592  		v.Aux = sym
 10593  		v.AddArg(ptr)
 10594  		v.AddArg(old)
 10595  		v.AddArg(new_)
 10596  		v.AddArg(mem)
 10597  		return true
 10598  	}
 10599  	return false
 10600  }
 10601  func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool {
 10602  	// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
 10603  	// cond: is32Bit(off1+off2)
 10604  	// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
 10605  	for {
 10606  		off1 := v.AuxInt
 10607  		sym := v.Aux
 10608  		_ = v.Args[3]
 10609  		v_0 := v.Args[0]
 10610  		if v_0.Op != OpAMD64ADDQconst {
 10611  			break
 10612  		}
 10613  		off2 := v_0.AuxInt
 10614  		ptr := v_0.Args[0]
 10615  		old := v.Args[1]
 10616  		new_ := v.Args[2]
 10617  		mem := v.Args[3]
 10618  		if !(is32Bit(off1 + off2)) {
 10619  			break
 10620  		}
 10621  		v.reset(OpAMD64CMPXCHGQlock)
 10622  		v.AuxInt = off1 + off2
 10623  		v.Aux = sym
 10624  		v.AddArg(ptr)
 10625  		v.AddArg(old)
 10626  		v.AddArg(new_)
 10627  		v.AddArg(mem)
 10628  		return true
 10629  	}
 10630  	return false
 10631  }
 10632  func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool {
 10633  	// match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
 10634  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
 10635  	// result: (DIVSDload x [off] {sym} ptr mem)
 10636  	for {
 10637  		_ = v.Args[1]
 10638  		x := v.Args[0]
 10639  		l := v.Args[1]
 10640  		if l.Op != OpAMD64MOVSDload {
 10641  			break
 10642  		}
 10643  		off := l.AuxInt
 10644  		sym := l.Aux
 10645  		_ = l.Args[1]
 10646  		ptr := l.Args[0]
 10647  		mem := l.Args[1]
 10648  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
 10649  			break
 10650  		}
 10651  		v.reset(OpAMD64DIVSDload)
 10652  		v.AuxInt = off
 10653  		v.Aux = sym
 10654  		v.AddArg(x)
 10655  		v.AddArg(ptr)
 10656  		v.AddArg(mem)
 10657  		return true
 10658  	}
 10659  	return false
 10660  }
 10661  func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool {
 10662  	// match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
 10663  	// cond: is32Bit(off1+off2)
 10664  	// result: (DIVSDload [off1+off2] {sym} val base mem)
 10665  	for {
 10666  		off1 := v.AuxInt
 10667  		sym := v.Aux
 10668  		_ = v.Args[2]
 10669  		val := v.Args[0]
 10670  		v_1 := v.Args[1]
 10671  		if v_1.Op != OpAMD64ADDQconst {
 10672  			break
 10673  		}
 10674  		off2 := v_1.AuxInt
 10675  		base := v_1.Args[0]
 10676  		mem := v.Args[2]
 10677  		if !(is32Bit(off1 + off2)) {
 10678  			break
 10679  		}
 10680  		v.reset(OpAMD64DIVSDload)
 10681  		v.AuxInt = off1 + off2
 10682  		v.Aux = sym
 10683  		v.AddArg(val)
 10684  		v.AddArg(base)
 10685  		v.AddArg(mem)
 10686  		return true
 10687  	}
 10688  	// match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
 10689  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10690  	// result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
 10691  	for {
 10692  		off1 := v.AuxInt
 10693  		sym1 := v.Aux
 10694  		_ = v.Args[2]
 10695  		val := v.Args[0]
 10696  		v_1 := v.Args[1]
 10697  		if v_1.Op != OpAMD64LEAQ {
 10698  			break
 10699  		}
 10700  		off2 := v_1.AuxInt
 10701  		sym2 := v_1.Aux
 10702  		base := v_1.Args[0]
 10703  		mem := v.Args[2]
 10704  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10705  			break
 10706  		}
 10707  		v.reset(OpAMD64DIVSDload)
 10708  		v.AuxInt = off1 + off2
 10709  		v.Aux = mergeSym(sym1, sym2)
 10710  		v.AddArg(val)
 10711  		v.AddArg(base)
 10712  		v.AddArg(mem)
 10713  		return true
 10714  	}
 10715  	return false
 10716  }
 10717  func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool {
 10718  	// match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
 10719  	// cond: canMergeLoadClobber(v, l, x) && clobber(l)
 10720  	// result: (DIVSSload x [off] {sym} ptr mem)
 10721  	for {
 10722  		_ = v.Args[1]
 10723  		x := v.Args[0]
 10724  		l := v.Args[1]
 10725  		if l.Op != OpAMD64MOVSSload {
 10726  			break
 10727  		}
 10728  		off := l.AuxInt
 10729  		sym := l.Aux
 10730  		_ = l.Args[1]
 10731  		ptr := l.Args[0]
 10732  		mem := l.Args[1]
 10733  		if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
 10734  			break
 10735  		}
 10736  		v.reset(OpAMD64DIVSSload)
 10737  		v.AuxInt = off
 10738  		v.Aux = sym
 10739  		v.AddArg(x)
 10740  		v.AddArg(ptr)
 10741  		v.AddArg(mem)
 10742  		return true
 10743  	}
 10744  	return false
 10745  }
 10746  func rewriteValueAMD64_OpAMD64DIVSSload_0(v *Value) bool {
 10747  	// match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
 10748  	// cond: is32Bit(off1+off2)
 10749  	// result: (DIVSSload [off1+off2] {sym} val base mem)
 10750  	for {
 10751  		off1 := v.AuxInt
 10752  		sym := v.Aux
 10753  		_ = v.Args[2]
 10754  		val := v.Args[0]
 10755  		v_1 := v.Args[1]
 10756  		if v_1.Op != OpAMD64ADDQconst {
 10757  			break
 10758  		}
 10759  		off2 := v_1.AuxInt
 10760  		base := v_1.Args[0]
 10761  		mem := v.Args[2]
 10762  		if !(is32Bit(off1 + off2)) {
 10763  			break
 10764  		}
 10765  		v.reset(OpAMD64DIVSSload)
 10766  		v.AuxInt = off1 + off2
 10767  		v.Aux = sym
 10768  		v.AddArg(val)
 10769  		v.AddArg(base)
 10770  		v.AddArg(mem)
 10771  		return true
 10772  	}
 10773  	// match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
 10774  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 10775  	// result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
 10776  	for {
 10777  		off1 := v.AuxInt
 10778  		sym1 := v.Aux
 10779  		_ = v.Args[2]
 10780  		val := v.Args[0]
 10781  		v_1 := v.Args[1]
 10782  		if v_1.Op != OpAMD64LEAQ {
 10783  			break
 10784  		}
 10785  		off2 := v_1.AuxInt
 10786  		sym2 := v_1.Aux
 10787  		base := v_1.Args[0]
 10788  		mem := v.Args[2]
 10789  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 10790  			break
 10791  		}
 10792  		v.reset(OpAMD64DIVSSload)
 10793  		v.AuxInt = off1 + off2
 10794  		v.Aux = mergeSym(sym1, sym2)
 10795  		v.AddArg(val)
 10796  		v.AddArg(base)
 10797  		v.AddArg(mem)
 10798  		return true
 10799  	}
 10800  	return false
 10801  }
 10802  func rewriteValueAMD64_OpAMD64HMULL_0(v *Value) bool {
 10803  	// match: (HMULL x y)
 10804  	// cond: !x.rematerializeable() && y.rematerializeable()
 10805  	// result: (HMULL y x)
 10806  	for {
 10807  		_ = v.Args[1]
 10808  		x := v.Args[0]
 10809  		y := v.Args[1]
 10810  		if !(!x.rematerializeable() && y.rematerializeable()) {
 10811  			break
 10812  		}
 10813  		v.reset(OpAMD64HMULL)
 10814  		v.AddArg(y)
 10815  		v.AddArg(x)
 10816  		return true
 10817  	}
 10818  	return false
 10819  }
 10820  func rewriteValueAMD64_OpAMD64HMULLU_0(v *Value) bool {
 10821  	// match: (HMULLU x y)
 10822  	// cond: !x.rematerializeable() && y.rematerializeable()
 10823  	// result: (HMULLU y x)
 10824  	for {
 10825  		_ = v.Args[1]
 10826  		x := v.Args[0]
 10827  		y := v.Args[1]
 10828  		if !(!x.rematerializeable() && y.rematerializeable()) {
 10829  			break
 10830  		}
 10831  		v.reset(OpAMD64HMULLU)
 10832  		v.AddArg(y)
 10833  		v.AddArg(x)
 10834  		return true
 10835  	}
 10836  	return false
 10837  }
 10838  func rewriteValueAMD64_OpAMD64HMULQ_0(v *Value) bool {
 10839  	// match: (HMULQ x y)
 10840  	// cond: !x.rematerializeable() && y.rematerializeable()
 10841  	// result: (HMULQ y x)
 10842  	for {
 10843  		_ = v.Args[1]
 10844  		x := v.Args[0]
 10845  		y := v.Args[1]
 10846  		if !(!x.rematerializeable() && y.rematerializeable()) {
 10847  			break
 10848  		}
 10849  		v.reset(OpAMD64HMULQ)
 10850  		v.AddArg(y)
 10851  		v.AddArg(x)
 10852  		return true
 10853  	}
 10854  	return false
 10855  }
 10856  func rewriteValueAMD64_OpAMD64HMULQU_0(v *Value) bool {
 10857  	// match: (HMULQU x y)
 10858  	// cond: !x.rematerializeable() && y.rematerializeable()
 10859  	// result: (HMULQU y x)
 10860  	for {
 10861  		_ = v.Args[1]
 10862  		x := v.Args[0]
 10863  		y := v.Args[1]
 10864  		if !(!x.rematerializeable() && y.rematerializeable()) {
 10865  			break
 10866  		}
 10867  		v.reset(OpAMD64HMULQU)
 10868  		v.AddArg(y)
 10869  		v.AddArg(x)
 10870  		return true
 10871  	}
 10872  	return false
 10873  }
 10874  func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool {
 10875  	// match: (LEAL [c] {s} (ADDLconst [d] x))
 10876  	// cond: is32Bit(c+d)
 10877  	// result: (LEAL [c+d] {s} x)
 10878  	for {
 10879  		c := v.AuxInt
 10880  		s := v.Aux
 10881  		v_0 := v.Args[0]
 10882  		if v_0.Op != OpAMD64ADDLconst {
 10883  			break
 10884  		}
 10885  		d := v_0.AuxInt
 10886  		x := v_0.Args[0]
 10887  		if !(is32Bit(c + d)) {
 10888  			break
 10889  		}
 10890  		v.reset(OpAMD64LEAL)
 10891  		v.AuxInt = c + d
 10892  		v.Aux = s
 10893  		v.AddArg(x)
 10894  		return true
 10895  	}
 10896  	// match: (LEAL [c] {s} (ADDL x y))
 10897  	// cond: x.Op != OpSB && y.Op != OpSB
 10898  	// result: (LEAL1 [c] {s} x y)
 10899  	for {
 10900  		c := v.AuxInt
 10901  		s := v.Aux
 10902  		v_0 := v.Args[0]
 10903  		if v_0.Op != OpAMD64ADDL {
 10904  			break
 10905  		}
 10906  		_ = v_0.Args[1]
 10907  		x := v_0.Args[0]
 10908  		y := v_0.Args[1]
 10909  		if !(x.Op != OpSB && y.Op != OpSB) {
 10910  			break
 10911  		}
 10912  		v.reset(OpAMD64LEAL1)
 10913  		v.AuxInt = c
 10914  		v.Aux = s
 10915  		v.AddArg(x)
 10916  		v.AddArg(y)
 10917  		return true
 10918  	}
 10919  	return false
 10920  }
 10921  func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool {
 10922  	// match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
 10923  	// cond: is32Bit(c+d) && x.Op != OpSB
 10924  	// result: (LEAL1 [c+d] {s} x y)
 10925  	for {
 10926  		c := v.AuxInt
 10927  		s := v.Aux
 10928  		_ = v.Args[1]
 10929  		v_0 := v.Args[0]
 10930  		if v_0.Op != OpAMD64ADDLconst {
 10931  			break
 10932  		}
 10933  		d := v_0.AuxInt
 10934  		x := v_0.Args[0]
 10935  		y := v.Args[1]
 10936  		if !(is32Bit(c+d) && x.Op != OpSB) {
 10937  			break
 10938  		}
 10939  		v.reset(OpAMD64LEAL1)
 10940  		v.AuxInt = c + d
 10941  		v.Aux = s
 10942  		v.AddArg(x)
 10943  		v.AddArg(y)
 10944  		return true
 10945  	}
 10946  	// match: (LEAL1 [c] {s} y (ADDLconst [d] x))
 10947  	// cond: is32Bit(c+d) && x.Op != OpSB
 10948  	// result: (LEAL1 [c+d] {s} x y)
 10949  	for {
 10950  		c := v.AuxInt
 10951  		s := v.Aux
 10952  		_ = v.Args[1]
 10953  		y := v.Args[0]
 10954  		v_1 := v.Args[1]
 10955  		if v_1.Op != OpAMD64ADDLconst {
 10956  			break
 10957  		}
 10958  		d := v_1.AuxInt
 10959  		x := v_1.Args[0]
 10960  		if !(is32Bit(c+d) && x.Op != OpSB) {
 10961  			break
 10962  		}
 10963  		v.reset(OpAMD64LEAL1)
 10964  		v.AuxInt = c + d
 10965  		v.Aux = s
 10966  		v.AddArg(x)
 10967  		v.AddArg(y)
 10968  		return true
 10969  	}
 10970  	// match: (LEAL1 [c] {s} x (SHLLconst [1] y))
 10971  	// cond:
 10972  	// result: (LEAL2 [c] {s} x y)
 10973  	for {
 10974  		c := v.AuxInt
 10975  		s := v.Aux
 10976  		_ = v.Args[1]
 10977  		x := v.Args[0]
 10978  		v_1 := v.Args[1]
 10979  		if v_1.Op != OpAMD64SHLLconst {
 10980  			break
 10981  		}
 10982  		if v_1.AuxInt != 1 {
 10983  			break
 10984  		}
 10985  		y := v_1.Args[0]
 10986  		v.reset(OpAMD64LEAL2)
 10987  		v.AuxInt = c
 10988  		v.Aux = s
 10989  		v.AddArg(x)
 10990  		v.AddArg(y)
 10991  		return true
 10992  	}
 10993  	// match: (LEAL1 [c] {s} (SHLLconst [1] y) x)
 10994  	// cond:
 10995  	// result: (LEAL2 [c] {s} x y)
 10996  	for {
 10997  		c := v.AuxInt
 10998  		s := v.Aux
 10999  		_ = v.Args[1]
 11000  		v_0 := v.Args[0]
 11001  		if v_0.Op != OpAMD64SHLLconst {
 11002  			break
 11003  		}
 11004  		if v_0.AuxInt != 1 {
 11005  			break
 11006  		}
 11007  		y := v_0.Args[0]
 11008  		x := v.Args[1]
 11009  		v.reset(OpAMD64LEAL2)
 11010  		v.AuxInt = c
 11011  		v.Aux = s
 11012  		v.AddArg(x)
 11013  		v.AddArg(y)
 11014  		return true
 11015  	}
 11016  	// match: (LEAL1 [c] {s} x (SHLLconst [2] y))
 11017  	// cond:
 11018  	// result: (LEAL4 [c] {s} x y)
 11019  	for {
 11020  		c := v.AuxInt
 11021  		s := v.Aux
 11022  		_ = v.Args[1]
 11023  		x := v.Args[0]
 11024  		v_1 := v.Args[1]
 11025  		if v_1.Op != OpAMD64SHLLconst {
 11026  			break
 11027  		}
 11028  		if v_1.AuxInt != 2 {
 11029  			break
 11030  		}
 11031  		y := v_1.Args[0]
 11032  		v.reset(OpAMD64LEAL4)
 11033  		v.AuxInt = c
 11034  		v.Aux = s
 11035  		v.AddArg(x)
 11036  		v.AddArg(y)
 11037  		return true
 11038  	}
 11039  	// match: (LEAL1 [c] {s} (SHLLconst [2] y) x)
 11040  	// cond:
 11041  	// result: (LEAL4 [c] {s} x y)
 11042  	for {
 11043  		c := v.AuxInt
 11044  		s := v.Aux
 11045  		_ = v.Args[1]
 11046  		v_0 := v.Args[0]
 11047  		if v_0.Op != OpAMD64SHLLconst {
 11048  			break
 11049  		}
 11050  		if v_0.AuxInt != 2 {
 11051  			break
 11052  		}
 11053  		y := v_0.Args[0]
 11054  		x := v.Args[1]
 11055  		v.reset(OpAMD64LEAL4)
 11056  		v.AuxInt = c
 11057  		v.Aux = s
 11058  		v.AddArg(x)
 11059  		v.AddArg(y)
 11060  		return true
 11061  	}
 11062  	// match: (LEAL1 [c] {s} x (SHLLconst [3] y))
 11063  	// cond:
 11064  	// result: (LEAL8 [c] {s} x y)
 11065  	for {
 11066  		c := v.AuxInt
 11067  		s := v.Aux
 11068  		_ = v.Args[1]
 11069  		x := v.Args[0]
 11070  		v_1 := v.Args[1]
 11071  		if v_1.Op != OpAMD64SHLLconst {
 11072  			break
 11073  		}
 11074  		if v_1.AuxInt != 3 {
 11075  			break
 11076  		}
 11077  		y := v_1.Args[0]
 11078  		v.reset(OpAMD64LEAL8)
 11079  		v.AuxInt = c
 11080  		v.Aux = s
 11081  		v.AddArg(x)
 11082  		v.AddArg(y)
 11083  		return true
 11084  	}
 11085  	// match: (LEAL1 [c] {s} (SHLLconst [3] y) x)
 11086  	// cond:
 11087  	// result: (LEAL8 [c] {s} x y)
 11088  	for {
 11089  		c := v.AuxInt
 11090  		s := v.Aux
 11091  		_ = v.Args[1]
 11092  		v_0 := v.Args[0]
 11093  		if v_0.Op != OpAMD64SHLLconst {
 11094  			break
 11095  		}
 11096  		if v_0.AuxInt != 3 {
 11097  			break
 11098  		}
 11099  		y := v_0.Args[0]
 11100  		x := v.Args[1]
 11101  		v.reset(OpAMD64LEAL8)
 11102  		v.AuxInt = c
 11103  		v.Aux = s
 11104  		v.AddArg(x)
 11105  		v.AddArg(y)
 11106  		return true
 11107  	}
 11108  	return false
 11109  }
 11110  func rewriteValueAMD64_OpAMD64LEAL2_0(v *Value) bool {
 11111  	// match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
 11112  	// cond: is32Bit(c+d) && x.Op != OpSB
 11113  	// result: (LEAL2 [c+d] {s} x y)
 11114  	for {
 11115  		c := v.AuxInt
 11116  		s := v.Aux
 11117  		_ = v.Args[1]
 11118  		v_0 := v.Args[0]
 11119  		if v_0.Op != OpAMD64ADDLconst {
 11120  			break
 11121  		}
 11122  		d := v_0.AuxInt
 11123  		x := v_0.Args[0]
 11124  		y := v.Args[1]
 11125  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11126  			break
 11127  		}
 11128  		v.reset(OpAMD64LEAL2)
 11129  		v.AuxInt = c + d
 11130  		v.Aux = s
 11131  		v.AddArg(x)
 11132  		v.AddArg(y)
 11133  		return true
 11134  	}
 11135  	// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
 11136  	// cond: is32Bit(c+2*d) && y.Op != OpSB
 11137  	// result: (LEAL2 [c+2*d] {s} x y)
 11138  	for {
 11139  		c := v.AuxInt
 11140  		s := v.Aux
 11141  		_ = v.Args[1]
 11142  		x := v.Args[0]
 11143  		v_1 := v.Args[1]
 11144  		if v_1.Op != OpAMD64ADDLconst {
 11145  			break
 11146  		}
 11147  		d := v_1.AuxInt
 11148  		y := v_1.Args[0]
 11149  		if !(is32Bit(c+2*d) && y.Op != OpSB) {
 11150  			break
 11151  		}
 11152  		v.reset(OpAMD64LEAL2)
 11153  		v.AuxInt = c + 2*d
 11154  		v.Aux = s
 11155  		v.AddArg(x)
 11156  		v.AddArg(y)
 11157  		return true
 11158  	}
 11159  	// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
 11160  	// cond:
 11161  	// result: (LEAL4 [c] {s} x y)
 11162  	for {
 11163  		c := v.AuxInt
 11164  		s := v.Aux
 11165  		_ = v.Args[1]
 11166  		x := v.Args[0]
 11167  		v_1 := v.Args[1]
 11168  		if v_1.Op != OpAMD64SHLLconst {
 11169  			break
 11170  		}
 11171  		if v_1.AuxInt != 1 {
 11172  			break
 11173  		}
 11174  		y := v_1.Args[0]
 11175  		v.reset(OpAMD64LEAL4)
 11176  		v.AuxInt = c
 11177  		v.Aux = s
 11178  		v.AddArg(x)
 11179  		v.AddArg(y)
 11180  		return true
 11181  	}
 11182  	// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
 11183  	// cond:
 11184  	// result: (LEAL8 [c] {s} x y)
 11185  	for {
 11186  		c := v.AuxInt
 11187  		s := v.Aux
 11188  		_ = v.Args[1]
 11189  		x := v.Args[0]
 11190  		v_1 := v.Args[1]
 11191  		if v_1.Op != OpAMD64SHLLconst {
 11192  			break
 11193  		}
 11194  		if v_1.AuxInt != 2 {
 11195  			break
 11196  		}
 11197  		y := v_1.Args[0]
 11198  		v.reset(OpAMD64LEAL8)
 11199  		v.AuxInt = c
 11200  		v.Aux = s
 11201  		v.AddArg(x)
 11202  		v.AddArg(y)
 11203  		return true
 11204  	}
 11205  	return false
 11206  }
 11207  func rewriteValueAMD64_OpAMD64LEAL4_0(v *Value) bool {
 11208  	// match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
 11209  	// cond: is32Bit(c+d) && x.Op != OpSB
 11210  	// result: (LEAL4 [c+d] {s} x y)
 11211  	for {
 11212  		c := v.AuxInt
 11213  		s := v.Aux
 11214  		_ = v.Args[1]
 11215  		v_0 := v.Args[0]
 11216  		if v_0.Op != OpAMD64ADDLconst {
 11217  			break
 11218  		}
 11219  		d := v_0.AuxInt
 11220  		x := v_0.Args[0]
 11221  		y := v.Args[1]
 11222  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11223  			break
 11224  		}
 11225  		v.reset(OpAMD64LEAL4)
 11226  		v.AuxInt = c + d
 11227  		v.Aux = s
 11228  		v.AddArg(x)
 11229  		v.AddArg(y)
 11230  		return true
 11231  	}
 11232  	// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
 11233  	// cond: is32Bit(c+4*d) && y.Op != OpSB
 11234  	// result: (LEAL4 [c+4*d] {s} x y)
 11235  	for {
 11236  		c := v.AuxInt
 11237  		s := v.Aux
 11238  		_ = v.Args[1]
 11239  		x := v.Args[0]
 11240  		v_1 := v.Args[1]
 11241  		if v_1.Op != OpAMD64ADDLconst {
 11242  			break
 11243  		}
 11244  		d := v_1.AuxInt
 11245  		y := v_1.Args[0]
 11246  		if !(is32Bit(c+4*d) && y.Op != OpSB) {
 11247  			break
 11248  		}
 11249  		v.reset(OpAMD64LEAL4)
 11250  		v.AuxInt = c + 4*d
 11251  		v.Aux = s
 11252  		v.AddArg(x)
 11253  		v.AddArg(y)
 11254  		return true
 11255  	}
 11256  	// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
 11257  	// cond:
 11258  	// result: (LEAL8 [c] {s} x y)
 11259  	for {
 11260  		c := v.AuxInt
 11261  		s := v.Aux
 11262  		_ = v.Args[1]
 11263  		x := v.Args[0]
 11264  		v_1 := v.Args[1]
 11265  		if v_1.Op != OpAMD64SHLLconst {
 11266  			break
 11267  		}
 11268  		if v_1.AuxInt != 1 {
 11269  			break
 11270  		}
 11271  		y := v_1.Args[0]
 11272  		v.reset(OpAMD64LEAL8)
 11273  		v.AuxInt = c
 11274  		v.Aux = s
 11275  		v.AddArg(x)
 11276  		v.AddArg(y)
 11277  		return true
 11278  	}
 11279  	return false
 11280  }
 11281  func rewriteValueAMD64_OpAMD64LEAL8_0(v *Value) bool {
 11282  	// match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
 11283  	// cond: is32Bit(c+d) && x.Op != OpSB
 11284  	// result: (LEAL8 [c+d] {s} x y)
 11285  	for {
 11286  		c := v.AuxInt
 11287  		s := v.Aux
 11288  		_ = v.Args[1]
 11289  		v_0 := v.Args[0]
 11290  		if v_0.Op != OpAMD64ADDLconst {
 11291  			break
 11292  		}
 11293  		d := v_0.AuxInt
 11294  		x := v_0.Args[0]
 11295  		y := v.Args[1]
 11296  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11297  			break
 11298  		}
 11299  		v.reset(OpAMD64LEAL8)
 11300  		v.AuxInt = c + d
 11301  		v.Aux = s
 11302  		v.AddArg(x)
 11303  		v.AddArg(y)
 11304  		return true
 11305  	}
 11306  	// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
 11307  	// cond: is32Bit(c+8*d) && y.Op != OpSB
 11308  	// result: (LEAL8 [c+8*d] {s} x y)
 11309  	for {
 11310  		c := v.AuxInt
 11311  		s := v.Aux
 11312  		_ = v.Args[1]
 11313  		x := v.Args[0]
 11314  		v_1 := v.Args[1]
 11315  		if v_1.Op != OpAMD64ADDLconst {
 11316  			break
 11317  		}
 11318  		d := v_1.AuxInt
 11319  		y := v_1.Args[0]
 11320  		if !(is32Bit(c+8*d) && y.Op != OpSB) {
 11321  			break
 11322  		}
 11323  		v.reset(OpAMD64LEAL8)
 11324  		v.AuxInt = c + 8*d
 11325  		v.Aux = s
 11326  		v.AddArg(x)
 11327  		v.AddArg(y)
 11328  		return true
 11329  	}
 11330  	return false
 11331  }
 11332  func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool {
 11333  	// match: (LEAQ [c] {s} (ADDQconst [d] x))
 11334  	// cond: is32Bit(c+d)
 11335  	// result: (LEAQ [c+d] {s} x)
 11336  	for {
 11337  		c := v.AuxInt
 11338  		s := v.Aux
 11339  		v_0 := v.Args[0]
 11340  		if v_0.Op != OpAMD64ADDQconst {
 11341  			break
 11342  		}
 11343  		d := v_0.AuxInt
 11344  		x := v_0.Args[0]
 11345  		if !(is32Bit(c + d)) {
 11346  			break
 11347  		}
 11348  		v.reset(OpAMD64LEAQ)
 11349  		v.AuxInt = c + d
 11350  		v.Aux = s
 11351  		v.AddArg(x)
 11352  		return true
 11353  	}
 11354  	// match: (LEAQ [c] {s} (ADDQ x y))
 11355  	// cond: x.Op != OpSB && y.Op != OpSB
 11356  	// result: (LEAQ1 [c] {s} x y)
 11357  	for {
 11358  		c := v.AuxInt
 11359  		s := v.Aux
 11360  		v_0 := v.Args[0]
 11361  		if v_0.Op != OpAMD64ADDQ {
 11362  			break
 11363  		}
 11364  		_ = v_0.Args[1]
 11365  		x := v_0.Args[0]
 11366  		y := v_0.Args[1]
 11367  		if !(x.Op != OpSB && y.Op != OpSB) {
 11368  			break
 11369  		}
 11370  		v.reset(OpAMD64LEAQ1)
 11371  		v.AuxInt = c
 11372  		v.Aux = s
 11373  		v.AddArg(x)
 11374  		v.AddArg(y)
 11375  		return true
 11376  	}
 11377  	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
 11378  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11379  	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
 11380  	for {
 11381  		off1 := v.AuxInt
 11382  		sym1 := v.Aux
 11383  		v_0 := v.Args[0]
 11384  		if v_0.Op != OpAMD64LEAQ {
 11385  			break
 11386  		}
 11387  		off2 := v_0.AuxInt
 11388  		sym2 := v_0.Aux
 11389  		x := v_0.Args[0]
 11390  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11391  			break
 11392  		}
 11393  		v.reset(OpAMD64LEAQ)
 11394  		v.AuxInt = off1 + off2
 11395  		v.Aux = mergeSym(sym1, sym2)
 11396  		v.AddArg(x)
 11397  		return true
 11398  	}
 11399  	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
 11400  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11401  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11402  	for {
 11403  		off1 := v.AuxInt
 11404  		sym1 := v.Aux
 11405  		v_0 := v.Args[0]
 11406  		if v_0.Op != OpAMD64LEAQ1 {
 11407  			break
 11408  		}
 11409  		off2 := v_0.AuxInt
 11410  		sym2 := v_0.Aux
 11411  		_ = v_0.Args[1]
 11412  		x := v_0.Args[0]
 11413  		y := v_0.Args[1]
 11414  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11415  			break
 11416  		}
 11417  		v.reset(OpAMD64LEAQ1)
 11418  		v.AuxInt = off1 + off2
 11419  		v.Aux = mergeSym(sym1, sym2)
 11420  		v.AddArg(x)
 11421  		v.AddArg(y)
 11422  		return true
 11423  	}
 11424  	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
 11425  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11426  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11427  	for {
 11428  		off1 := v.AuxInt
 11429  		sym1 := v.Aux
 11430  		v_0 := v.Args[0]
 11431  		if v_0.Op != OpAMD64LEAQ2 {
 11432  			break
 11433  		}
 11434  		off2 := v_0.AuxInt
 11435  		sym2 := v_0.Aux
 11436  		_ = v_0.Args[1]
 11437  		x := v_0.Args[0]
 11438  		y := v_0.Args[1]
 11439  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11440  			break
 11441  		}
 11442  		v.reset(OpAMD64LEAQ2)
 11443  		v.AuxInt = off1 + off2
 11444  		v.Aux = mergeSym(sym1, sym2)
 11445  		v.AddArg(x)
 11446  		v.AddArg(y)
 11447  		return true
 11448  	}
 11449  	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
 11450  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11451  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11452  	for {
 11453  		off1 := v.AuxInt
 11454  		sym1 := v.Aux
 11455  		v_0 := v.Args[0]
 11456  		if v_0.Op != OpAMD64LEAQ4 {
 11457  			break
 11458  		}
 11459  		off2 := v_0.AuxInt
 11460  		sym2 := v_0.Aux
 11461  		_ = v_0.Args[1]
 11462  		x := v_0.Args[0]
 11463  		y := v_0.Args[1]
 11464  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11465  			break
 11466  		}
 11467  		v.reset(OpAMD64LEAQ4)
 11468  		v.AuxInt = off1 + off2
 11469  		v.Aux = mergeSym(sym1, sym2)
 11470  		v.AddArg(x)
 11471  		v.AddArg(y)
 11472  		return true
 11473  	}
 11474  	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
 11475  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 11476  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11477  	for {
 11478  		off1 := v.AuxInt
 11479  		sym1 := v.Aux
 11480  		v_0 := v.Args[0]
 11481  		if v_0.Op != OpAMD64LEAQ8 {
 11482  			break
 11483  		}
 11484  		off2 := v_0.AuxInt
 11485  		sym2 := v_0.Aux
 11486  		_ = v_0.Args[1]
 11487  		x := v_0.Args[0]
 11488  		y := v_0.Args[1]
 11489  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 11490  			break
 11491  		}
 11492  		v.reset(OpAMD64LEAQ8)
 11493  		v.AuxInt = off1 + off2
 11494  		v.Aux = mergeSym(sym1, sym2)
 11495  		v.AddArg(x)
 11496  		v.AddArg(y)
 11497  		return true
 11498  	}
 11499  	return false
 11500  }
 11501  func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool {
 11502  	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
 11503  	// cond: is32Bit(c+d) && x.Op != OpSB
 11504  	// result: (LEAQ1 [c+d] {s} x y)
 11505  	for {
 11506  		c := v.AuxInt
 11507  		s := v.Aux
 11508  		_ = v.Args[1]
 11509  		v_0 := v.Args[0]
 11510  		if v_0.Op != OpAMD64ADDQconst {
 11511  			break
 11512  		}
 11513  		d := v_0.AuxInt
 11514  		x := v_0.Args[0]
 11515  		y := v.Args[1]
 11516  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11517  			break
 11518  		}
 11519  		v.reset(OpAMD64LEAQ1)
 11520  		v.AuxInt = c + d
 11521  		v.Aux = s
 11522  		v.AddArg(x)
 11523  		v.AddArg(y)
 11524  		return true
 11525  	}
 11526  	// match: (LEAQ1 [c] {s} y (ADDQconst [d] x))
 11527  	// cond: is32Bit(c+d) && x.Op != OpSB
 11528  	// result: (LEAQ1 [c+d] {s} x y)
 11529  	for {
 11530  		c := v.AuxInt
 11531  		s := v.Aux
 11532  		_ = v.Args[1]
 11533  		y := v.Args[0]
 11534  		v_1 := v.Args[1]
 11535  		if v_1.Op != OpAMD64ADDQconst {
 11536  			break
 11537  		}
 11538  		d := v_1.AuxInt
 11539  		x := v_1.Args[0]
 11540  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11541  			break
 11542  		}
 11543  		v.reset(OpAMD64LEAQ1)
 11544  		v.AuxInt = c + d
 11545  		v.Aux = s
 11546  		v.AddArg(x)
 11547  		v.AddArg(y)
 11548  		return true
 11549  	}
 11550  	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
 11551  	// cond:
 11552  	// result: (LEAQ2 [c] {s} x y)
 11553  	for {
 11554  		c := v.AuxInt
 11555  		s := v.Aux
 11556  		_ = v.Args[1]
 11557  		x := v.Args[0]
 11558  		v_1 := v.Args[1]
 11559  		if v_1.Op != OpAMD64SHLQconst {
 11560  			break
 11561  		}
 11562  		if v_1.AuxInt != 1 {
 11563  			break
 11564  		}
 11565  		y := v_1.Args[0]
 11566  		v.reset(OpAMD64LEAQ2)
 11567  		v.AuxInt = c
 11568  		v.Aux = s
 11569  		v.AddArg(x)
 11570  		v.AddArg(y)
 11571  		return true
 11572  	}
 11573  	// match: (LEAQ1 [c] {s} (SHLQconst [1] y) x)
 11574  	// cond:
 11575  	// result: (LEAQ2 [c] {s} x y)
 11576  	for {
 11577  		c := v.AuxInt
 11578  		s := v.Aux
 11579  		_ = v.Args[1]
 11580  		v_0 := v.Args[0]
 11581  		if v_0.Op != OpAMD64SHLQconst {
 11582  			break
 11583  		}
 11584  		if v_0.AuxInt != 1 {
 11585  			break
 11586  		}
 11587  		y := v_0.Args[0]
 11588  		x := v.Args[1]
 11589  		v.reset(OpAMD64LEAQ2)
 11590  		v.AuxInt = c
 11591  		v.Aux = s
 11592  		v.AddArg(x)
 11593  		v.AddArg(y)
 11594  		return true
 11595  	}
 11596  	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
 11597  	// cond:
 11598  	// result: (LEAQ4 [c] {s} x y)
 11599  	for {
 11600  		c := v.AuxInt
 11601  		s := v.Aux
 11602  		_ = v.Args[1]
 11603  		x := v.Args[0]
 11604  		v_1 := v.Args[1]
 11605  		if v_1.Op != OpAMD64SHLQconst {
 11606  			break
 11607  		}
 11608  		if v_1.AuxInt != 2 {
 11609  			break
 11610  		}
 11611  		y := v_1.Args[0]
 11612  		v.reset(OpAMD64LEAQ4)
 11613  		v.AuxInt = c
 11614  		v.Aux = s
 11615  		v.AddArg(x)
 11616  		v.AddArg(y)
 11617  		return true
 11618  	}
 11619  	// match: (LEAQ1 [c] {s} (SHLQconst [2] y) x)
 11620  	// cond:
 11621  	// result: (LEAQ4 [c] {s} x y)
 11622  	for {
 11623  		c := v.AuxInt
 11624  		s := v.Aux
 11625  		_ = v.Args[1]
 11626  		v_0 := v.Args[0]
 11627  		if v_0.Op != OpAMD64SHLQconst {
 11628  			break
 11629  		}
 11630  		if v_0.AuxInt != 2 {
 11631  			break
 11632  		}
 11633  		y := v_0.Args[0]
 11634  		x := v.Args[1]
 11635  		v.reset(OpAMD64LEAQ4)
 11636  		v.AuxInt = c
 11637  		v.Aux = s
 11638  		v.AddArg(x)
 11639  		v.AddArg(y)
 11640  		return true
 11641  	}
 11642  	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
 11643  	// cond:
 11644  	// result: (LEAQ8 [c] {s} x y)
 11645  	for {
 11646  		c := v.AuxInt
 11647  		s := v.Aux
 11648  		_ = v.Args[1]
 11649  		x := v.Args[0]
 11650  		v_1 := v.Args[1]
 11651  		if v_1.Op != OpAMD64SHLQconst {
 11652  			break
 11653  		}
 11654  		if v_1.AuxInt != 3 {
 11655  			break
 11656  		}
 11657  		y := v_1.Args[0]
 11658  		v.reset(OpAMD64LEAQ8)
 11659  		v.AuxInt = c
 11660  		v.Aux = s
 11661  		v.AddArg(x)
 11662  		v.AddArg(y)
 11663  		return true
 11664  	}
 11665  	// match: (LEAQ1 [c] {s} (SHLQconst [3] y) x)
 11666  	// cond:
 11667  	// result: (LEAQ8 [c] {s} x y)
 11668  	for {
 11669  		c := v.AuxInt
 11670  		s := v.Aux
 11671  		_ = v.Args[1]
 11672  		v_0 := v.Args[0]
 11673  		if v_0.Op != OpAMD64SHLQconst {
 11674  			break
 11675  		}
 11676  		if v_0.AuxInt != 3 {
 11677  			break
 11678  		}
 11679  		y := v_0.Args[0]
 11680  		x := v.Args[1]
 11681  		v.reset(OpAMD64LEAQ8)
 11682  		v.AuxInt = c
 11683  		v.Aux = s
 11684  		v.AddArg(x)
 11685  		v.AddArg(y)
 11686  		return true
 11687  	}
 11688  	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
 11689  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
 11690  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11691  	for {
 11692  		off1 := v.AuxInt
 11693  		sym1 := v.Aux
 11694  		_ = v.Args[1]
 11695  		v_0 := v.Args[0]
 11696  		if v_0.Op != OpAMD64LEAQ {
 11697  			break
 11698  		}
 11699  		off2 := v_0.AuxInt
 11700  		sym2 := v_0.Aux
 11701  		x := v_0.Args[0]
 11702  		y := v.Args[1]
 11703  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
 11704  			break
 11705  		}
 11706  		v.reset(OpAMD64LEAQ1)
 11707  		v.AuxInt = off1 + off2
 11708  		v.Aux = mergeSym(sym1, sym2)
 11709  		v.AddArg(x)
 11710  		v.AddArg(y)
 11711  		return true
 11712  	}
 11713  	// match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x))
 11714  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
 11715  	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11716  	for {
 11717  		off1 := v.AuxInt
 11718  		sym1 := v.Aux
 11719  		_ = v.Args[1]
 11720  		y := v.Args[0]
 11721  		v_1 := v.Args[1]
 11722  		if v_1.Op != OpAMD64LEAQ {
 11723  			break
 11724  		}
 11725  		off2 := v_1.AuxInt
 11726  		sym2 := v_1.Aux
 11727  		x := v_1.Args[0]
 11728  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
 11729  			break
 11730  		}
 11731  		v.reset(OpAMD64LEAQ1)
 11732  		v.AuxInt = off1 + off2
 11733  		v.Aux = mergeSym(sym1, sym2)
 11734  		v.AddArg(x)
 11735  		v.AddArg(y)
 11736  		return true
 11737  	}
 11738  	return false
 11739  }
 11740  func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool {
 11741  	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
 11742  	// cond: is32Bit(c+d) && x.Op != OpSB
 11743  	// result: (LEAQ2 [c+d] {s} x y)
 11744  	for {
 11745  		c := v.AuxInt
 11746  		s := v.Aux
 11747  		_ = v.Args[1]
 11748  		v_0 := v.Args[0]
 11749  		if v_0.Op != OpAMD64ADDQconst {
 11750  			break
 11751  		}
 11752  		d := v_0.AuxInt
 11753  		x := v_0.Args[0]
 11754  		y := v.Args[1]
 11755  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11756  			break
 11757  		}
 11758  		v.reset(OpAMD64LEAQ2)
 11759  		v.AuxInt = c + d
 11760  		v.Aux = s
 11761  		v.AddArg(x)
 11762  		v.AddArg(y)
 11763  		return true
 11764  	}
 11765  	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
 11766  	// cond: is32Bit(c+2*d) && y.Op != OpSB
 11767  	// result: (LEAQ2 [c+2*d] {s} x y)
 11768  	for {
 11769  		c := v.AuxInt
 11770  		s := v.Aux
 11771  		_ = v.Args[1]
 11772  		x := v.Args[0]
 11773  		v_1 := v.Args[1]
 11774  		if v_1.Op != OpAMD64ADDQconst {
 11775  			break
 11776  		}
 11777  		d := v_1.AuxInt
 11778  		y := v_1.Args[0]
 11779  		if !(is32Bit(c+2*d) && y.Op != OpSB) {
 11780  			break
 11781  		}
 11782  		v.reset(OpAMD64LEAQ2)
 11783  		v.AuxInt = c + 2*d
 11784  		v.Aux = s
 11785  		v.AddArg(x)
 11786  		v.AddArg(y)
 11787  		return true
 11788  	}
 11789  	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
 11790  	// cond:
 11791  	// result: (LEAQ4 [c] {s} x y)
 11792  	for {
 11793  		c := v.AuxInt
 11794  		s := v.Aux
 11795  		_ = v.Args[1]
 11796  		x := v.Args[0]
 11797  		v_1 := v.Args[1]
 11798  		if v_1.Op != OpAMD64SHLQconst {
 11799  			break
 11800  		}
 11801  		if v_1.AuxInt != 1 {
 11802  			break
 11803  		}
 11804  		y := v_1.Args[0]
 11805  		v.reset(OpAMD64LEAQ4)
 11806  		v.AuxInt = c
 11807  		v.Aux = s
 11808  		v.AddArg(x)
 11809  		v.AddArg(y)
 11810  		return true
 11811  	}
 11812  	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
 11813  	// cond:
 11814  	// result: (LEAQ8 [c] {s} x y)
 11815  	for {
 11816  		c := v.AuxInt
 11817  		s := v.Aux
 11818  		_ = v.Args[1]
 11819  		x := v.Args[0]
 11820  		v_1 := v.Args[1]
 11821  		if v_1.Op != OpAMD64SHLQconst {
 11822  			break
 11823  		}
 11824  		if v_1.AuxInt != 2 {
 11825  			break
 11826  		}
 11827  		y := v_1.Args[0]
 11828  		v.reset(OpAMD64LEAQ8)
 11829  		v.AuxInt = c
 11830  		v.Aux = s
 11831  		v.AddArg(x)
 11832  		v.AddArg(y)
 11833  		return true
 11834  	}
 11835  	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
 11836  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
 11837  	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11838  	for {
 11839  		off1 := v.AuxInt
 11840  		sym1 := v.Aux
 11841  		_ = v.Args[1]
 11842  		v_0 := v.Args[0]
 11843  		if v_0.Op != OpAMD64LEAQ {
 11844  			break
 11845  		}
 11846  		off2 := v_0.AuxInt
 11847  		sym2 := v_0.Aux
 11848  		x := v_0.Args[0]
 11849  		y := v.Args[1]
 11850  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
 11851  			break
 11852  		}
 11853  		v.reset(OpAMD64LEAQ2)
 11854  		v.AuxInt = off1 + off2
 11855  		v.Aux = mergeSym(sym1, sym2)
 11856  		v.AddArg(x)
 11857  		v.AddArg(y)
 11858  		return true
 11859  	}
 11860  	return false
 11861  }
 11862  func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool {
 11863  	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
 11864  	// cond: is32Bit(c+d) && x.Op != OpSB
 11865  	// result: (LEAQ4 [c+d] {s} x y)
 11866  	for {
 11867  		c := v.AuxInt
 11868  		s := v.Aux
 11869  		_ = v.Args[1]
 11870  		v_0 := v.Args[0]
 11871  		if v_0.Op != OpAMD64ADDQconst {
 11872  			break
 11873  		}
 11874  		d := v_0.AuxInt
 11875  		x := v_0.Args[0]
 11876  		y := v.Args[1]
 11877  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11878  			break
 11879  		}
 11880  		v.reset(OpAMD64LEAQ4)
 11881  		v.AuxInt = c + d
 11882  		v.Aux = s
 11883  		v.AddArg(x)
 11884  		v.AddArg(y)
 11885  		return true
 11886  	}
 11887  	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
 11888  	// cond: is32Bit(c+4*d) && y.Op != OpSB
 11889  	// result: (LEAQ4 [c+4*d] {s} x y)
 11890  	for {
 11891  		c := v.AuxInt
 11892  		s := v.Aux
 11893  		_ = v.Args[1]
 11894  		x := v.Args[0]
 11895  		v_1 := v.Args[1]
 11896  		if v_1.Op != OpAMD64ADDQconst {
 11897  			break
 11898  		}
 11899  		d := v_1.AuxInt
 11900  		y := v_1.Args[0]
 11901  		if !(is32Bit(c+4*d) && y.Op != OpSB) {
 11902  			break
 11903  		}
 11904  		v.reset(OpAMD64LEAQ4)
 11905  		v.AuxInt = c + 4*d
 11906  		v.Aux = s
 11907  		v.AddArg(x)
 11908  		v.AddArg(y)
 11909  		return true
 11910  	}
 11911  	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
 11912  	// cond:
 11913  	// result: (LEAQ8 [c] {s} x y)
 11914  	for {
 11915  		c := v.AuxInt
 11916  		s := v.Aux
 11917  		_ = v.Args[1]
 11918  		x := v.Args[0]
 11919  		v_1 := v.Args[1]
 11920  		if v_1.Op != OpAMD64SHLQconst {
 11921  			break
 11922  		}
 11923  		if v_1.AuxInt != 1 {
 11924  			break
 11925  		}
 11926  		y := v_1.Args[0]
 11927  		v.reset(OpAMD64LEAQ8)
 11928  		v.AuxInt = c
 11929  		v.Aux = s
 11930  		v.AddArg(x)
 11931  		v.AddArg(y)
 11932  		return true
 11933  	}
 11934  	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
 11935  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
 11936  	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
 11937  	for {
 11938  		off1 := v.AuxInt
 11939  		sym1 := v.Aux
 11940  		_ = v.Args[1]
 11941  		v_0 := v.Args[0]
 11942  		if v_0.Op != OpAMD64LEAQ {
 11943  			break
 11944  		}
 11945  		off2 := v_0.AuxInt
 11946  		sym2 := v_0.Aux
 11947  		x := v_0.Args[0]
 11948  		y := v.Args[1]
 11949  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
 11950  			break
 11951  		}
 11952  		v.reset(OpAMD64LEAQ4)
 11953  		v.AuxInt = off1 + off2
 11954  		v.Aux = mergeSym(sym1, sym2)
 11955  		v.AddArg(x)
 11956  		v.AddArg(y)
 11957  		return true
 11958  	}
 11959  	return false
 11960  }
 11961  func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool {
 11962  	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
 11963  	// cond: is32Bit(c+d) && x.Op != OpSB
 11964  	// result: (LEAQ8 [c+d] {s} x y)
 11965  	for {
 11966  		c := v.AuxInt
 11967  		s := v.Aux
 11968  		_ = v.Args[1]
 11969  		v_0 := v.Args[0]
 11970  		if v_0.Op != OpAMD64ADDQconst {
 11971  			break
 11972  		}
 11973  		d := v_0.AuxInt
 11974  		x := v_0.Args[0]
 11975  		y := v.Args[1]
 11976  		if !(is32Bit(c+d) && x.Op != OpSB) {
 11977  			break
 11978  		}
 11979  		v.reset(OpAMD64LEAQ8)
 11980  		v.AuxInt = c + d
 11981  		v.Aux = s
 11982  		v.AddArg(x)
 11983  		v.AddArg(y)
 11984  		return true
 11985  	}
 11986  	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
 11987  	// cond: is32Bit(c+8*d) && y.Op != OpSB
 11988  	// result: (LEAQ8 [c+8*d] {s} x y)
 11989  	for {
 11990  		c := v.AuxInt
 11991  		s := v.Aux
 11992  		_ = v.Args[1]
 11993  		x := v.Args[0]
 11994  		v_1 := v.Args[1]
 11995  		if v_1.Op != OpAMD64ADDQconst {
 11996  			break
 11997  		}
 11998  		d := v_1.AuxInt
 11999  		y := v_1.Args[0]
 12000  		if !(is32Bit(c+8*d) && y.Op != OpSB) {
 12001  			break
 12002  		}
 12003  		v.reset(OpAMD64LEAQ8)
 12004  		v.AuxInt = c + 8*d
 12005  		v.Aux = s
 12006  		v.AddArg(x)
 12007  		v.AddArg(y)
 12008  		return true
 12009  	}
 12010  	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
 12011  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
 12012  	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
 12013  	for {
 12014  		off1 := v.AuxInt
 12015  		sym1 := v.Aux
 12016  		_ = v.Args[1]
 12017  		v_0 := v.Args[0]
 12018  		if v_0.Op != OpAMD64LEAQ {
 12019  			break
 12020  		}
 12021  		off2 := v_0.AuxInt
 12022  		sym2 := v_0.Aux
 12023  		x := v_0.Args[0]
 12024  		y := v.Args[1]
 12025  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
 12026  			break
 12027  		}
 12028  		v.reset(OpAMD64LEAQ8)
 12029  		v.AuxInt = off1 + off2
 12030  		v.Aux = mergeSym(sym1, sym2)
 12031  		v.AddArg(x)
 12032  		v.AddArg(y)
 12033  		return true
 12034  	}
 12035  	return false
 12036  }
 12037  func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool {
 12038  	b := v.Block
 12039  	_ = b
 12040  	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
 12041  	// cond: x.Uses == 1 && clobber(x)
 12042  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
 12043  	for {
 12044  		x := v.Args[0]
 12045  		if x.Op != OpAMD64MOVBload {
 12046  			break
 12047  		}
 12048  		off := x.AuxInt
 12049  		sym := x.Aux
 12050  		_ = x.Args[1]
 12051  		ptr := x.Args[0]
 12052  		mem := x.Args[1]
 12053  		if !(x.Uses == 1 && clobber(x)) {
 12054  			break
 12055  		}
 12056  		b = x.Block
 12057  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
 12058  		v.reset(OpCopy)
 12059  		v.AddArg(v0)
 12060  		v0.AuxInt = off
 12061  		v0.Aux = sym
 12062  		v0.AddArg(ptr)
 12063  		v0.AddArg(mem)
 12064  		return true
 12065  	}
 12066  	// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
 12067  	// cond: x.Uses == 1 && clobber(x)
 12068  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
 12069  	for {
 12070  		x := v.Args[0]
 12071  		if x.Op != OpAMD64MOVWload {
 12072  			break
 12073  		}
 12074  		off := x.AuxInt
 12075  		sym := x.Aux
 12076  		_ = x.Args[1]
 12077  		ptr := x.Args[0]
 12078  		mem := x.Args[1]
 12079  		if !(x.Uses == 1 && clobber(x)) {
 12080  			break
 12081  		}
 12082  		b = x.Block
 12083  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
 12084  		v.reset(OpCopy)
 12085  		v.AddArg(v0)
 12086  		v0.AuxInt = off
 12087  		v0.Aux = sym
 12088  		v0.AddArg(ptr)
 12089  		v0.AddArg(mem)
 12090  		return true
 12091  	}
 12092  	// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
 12093  	// cond: x.Uses == 1 && clobber(x)
 12094  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
 12095  	for {
 12096  		x := v.Args[0]
 12097  		if x.Op != OpAMD64MOVLload {
 12098  			break
 12099  		}
 12100  		off := x.AuxInt
 12101  		sym := x.Aux
 12102  		_ = x.Args[1]
 12103  		ptr := x.Args[0]
 12104  		mem := x.Args[1]
 12105  		if !(x.Uses == 1 && clobber(x)) {
 12106  			break
 12107  		}
 12108  		b = x.Block
 12109  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
 12110  		v.reset(OpCopy)
 12111  		v.AddArg(v0)
 12112  		v0.AuxInt = off
 12113  		v0.Aux = sym
 12114  		v0.AddArg(ptr)
 12115  		v0.AddArg(mem)
 12116  		return true
 12117  	}
 12118  	// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
 12119  	// cond: x.Uses == 1 && clobber(x)
 12120  	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
 12121  	for {
 12122  		x := v.Args[0]
 12123  		if x.Op != OpAMD64MOVQload {
 12124  			break
 12125  		}
 12126  		off := x.AuxInt
 12127  		sym := x.Aux
 12128  		_ = x.Args[1]
 12129  		ptr := x.Args[0]
 12130  		mem := x.Args[1]
 12131  		if !(x.Uses == 1 && clobber(x)) {
 12132  			break
 12133  		}
 12134  		b = x.Block
 12135  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
 12136  		v.reset(OpCopy)
 12137  		v.AddArg(v0)
 12138  		v0.AuxInt = off
 12139  		v0.Aux = sym
 12140  		v0.AddArg(ptr)
 12141  		v0.AddArg(mem)
 12142  		return true
 12143  	}
 12144  	// match: (MOVBQSX (ANDLconst [c] x))
 12145  	// cond: c & 0x80 == 0
 12146  	// result: (ANDLconst [c & 0x7f] x)
 12147  	for {
 12148  		v_0 := v.Args[0]
 12149  		if v_0.Op != OpAMD64ANDLconst {
 12150  			break
 12151  		}
 12152  		c := v_0.AuxInt
 12153  		x := v_0.Args[0]
 12154  		if !(c&0x80 == 0) {
 12155  			break
 12156  		}
 12157  		v.reset(OpAMD64ANDLconst)
 12158  		v.AuxInt = c & 0x7f
 12159  		v.AddArg(x)
 12160  		return true
 12161  	}
 12162  	// match: (MOVBQSX (MOVBQSX x))
 12163  	// cond:
 12164  	// result: (MOVBQSX x)
 12165  	for {
 12166  		v_0 := v.Args[0]
 12167  		if v_0.Op != OpAMD64MOVBQSX {
 12168  			break
 12169  		}
 12170  		x := v_0.Args[0]
 12171  		v.reset(OpAMD64MOVBQSX)
 12172  		v.AddArg(x)
 12173  		return true
 12174  	}
 12175  	return false
 12176  }
 12177  func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool {
 12178  	// match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
 12179  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
 12180  	// result: (MOVBQSX x)
 12181  	for {
 12182  		off := v.AuxInt
 12183  		sym := v.Aux
 12184  		_ = v.Args[1]
 12185  		ptr := v.Args[0]
 12186  		v_1 := v.Args[1]
 12187  		if v_1.Op != OpAMD64MOVBstore {
 12188  			break
 12189  		}
 12190  		off2 := v_1.AuxInt
 12191  		sym2 := v_1.Aux
 12192  		_ = v_1.Args[2]
 12193  		ptr2 := v_1.Args[0]
 12194  		x := v_1.Args[1]
 12195  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 12196  			break
 12197  		}
 12198  		v.reset(OpAMD64MOVBQSX)
 12199  		v.AddArg(x)
 12200  		return true
 12201  	}
 12202  	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 12203  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12204  	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12205  	for {
 12206  		off1 := v.AuxInt
 12207  		sym1 := v.Aux
 12208  		_ = v.Args[1]
 12209  		v_0 := v.Args[0]
 12210  		if v_0.Op != OpAMD64LEAQ {
 12211  			break
 12212  		}
 12213  		off2 := v_0.AuxInt
 12214  		sym2 := v_0.Aux
 12215  		base := v_0.Args[0]
 12216  		mem := v.Args[1]
 12217  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12218  			break
 12219  		}
 12220  		v.reset(OpAMD64MOVBQSXload)
 12221  		v.AuxInt = off1 + off2
 12222  		v.Aux = mergeSym(sym1, sym2)
 12223  		v.AddArg(base)
 12224  		v.AddArg(mem)
 12225  		return true
 12226  	}
 12227  	return false
 12228  }
 12229  func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool {
 12230  	b := v.Block
 12231  	_ = b
 12232  	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
 12233  	// cond: x.Uses == 1 && clobber(x)
 12234  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
 12235  	for {
 12236  		x := v.Args[0]
 12237  		if x.Op != OpAMD64MOVBload {
 12238  			break
 12239  		}
 12240  		off := x.AuxInt
 12241  		sym := x.Aux
 12242  		_ = x.Args[1]
 12243  		ptr := x.Args[0]
 12244  		mem := x.Args[1]
 12245  		if !(x.Uses == 1 && clobber(x)) {
 12246  			break
 12247  		}
 12248  		b = x.Block
 12249  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
 12250  		v.reset(OpCopy)
 12251  		v.AddArg(v0)
 12252  		v0.AuxInt = off
 12253  		v0.Aux = sym
 12254  		v0.AddArg(ptr)
 12255  		v0.AddArg(mem)
 12256  		return true
 12257  	}
 12258  	// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
 12259  	// cond: x.Uses == 1 && clobber(x)
 12260  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
 12261  	for {
 12262  		x := v.Args[0]
 12263  		if x.Op != OpAMD64MOVWload {
 12264  			break
 12265  		}
 12266  		off := x.AuxInt
 12267  		sym := x.Aux
 12268  		_ = x.Args[1]
 12269  		ptr := x.Args[0]
 12270  		mem := x.Args[1]
 12271  		if !(x.Uses == 1 && clobber(x)) {
 12272  			break
 12273  		}
 12274  		b = x.Block
 12275  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
 12276  		v.reset(OpCopy)
 12277  		v.AddArg(v0)
 12278  		v0.AuxInt = off
 12279  		v0.Aux = sym
 12280  		v0.AddArg(ptr)
 12281  		v0.AddArg(mem)
 12282  		return true
 12283  	}
 12284  	// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
 12285  	// cond: x.Uses == 1 && clobber(x)
 12286  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
 12287  	for {
 12288  		x := v.Args[0]
 12289  		if x.Op != OpAMD64MOVLload {
 12290  			break
 12291  		}
 12292  		off := x.AuxInt
 12293  		sym := x.Aux
 12294  		_ = x.Args[1]
 12295  		ptr := x.Args[0]
 12296  		mem := x.Args[1]
 12297  		if !(x.Uses == 1 && clobber(x)) {
 12298  			break
 12299  		}
 12300  		b = x.Block
 12301  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
 12302  		v.reset(OpCopy)
 12303  		v.AddArg(v0)
 12304  		v0.AuxInt = off
 12305  		v0.Aux = sym
 12306  		v0.AddArg(ptr)
 12307  		v0.AddArg(mem)
 12308  		return true
 12309  	}
 12310  	// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
 12311  	// cond: x.Uses == 1 && clobber(x)
 12312  	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
 12313  	for {
 12314  		x := v.Args[0]
 12315  		if x.Op != OpAMD64MOVQload {
 12316  			break
 12317  		}
 12318  		off := x.AuxInt
 12319  		sym := x.Aux
 12320  		_ = x.Args[1]
 12321  		ptr := x.Args[0]
 12322  		mem := x.Args[1]
 12323  		if !(x.Uses == 1 && clobber(x)) {
 12324  			break
 12325  		}
 12326  		b = x.Block
 12327  		v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
 12328  		v.reset(OpCopy)
 12329  		v.AddArg(v0)
 12330  		v0.AuxInt = off
 12331  		v0.Aux = sym
 12332  		v0.AddArg(ptr)
 12333  		v0.AddArg(mem)
 12334  		return true
 12335  	}
 12336  	// match: (MOVBQZX x)
 12337  	// cond: zeroUpper56Bits(x,3)
 12338  	// result: x
 12339  	for {
 12340  		x := v.Args[0]
 12341  		if !(zeroUpper56Bits(x, 3)) {
 12342  			break
 12343  		}
 12344  		v.reset(OpCopy)
 12345  		v.Type = x.Type
 12346  		v.AddArg(x)
 12347  		return true
 12348  	}
 12349  	// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
 12350  	// cond: x.Uses == 1 && clobber(x)
 12351  	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
 12352  	for {
 12353  		x := v.Args[0]
 12354  		if x.Op != OpAMD64MOVBloadidx1 {
 12355  			break
 12356  		}
 12357  		off := x.AuxInt
 12358  		sym := x.Aux
 12359  		_ = x.Args[2]
 12360  		ptr := x.Args[0]
 12361  		idx := x.Args[1]
 12362  		mem := x.Args[2]
 12363  		if !(x.Uses == 1 && clobber(x)) {
 12364  			break
 12365  		}
 12366  		b = x.Block
 12367  		v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
 12368  		v.reset(OpCopy)
 12369  		v.AddArg(v0)
 12370  		v0.AuxInt = off
 12371  		v0.Aux = sym
 12372  		v0.AddArg(ptr)
 12373  		v0.AddArg(idx)
 12374  		v0.AddArg(mem)
 12375  		return true
 12376  	}
 12377  	// match: (MOVBQZX (ANDLconst [c] x))
 12378  	// cond:
 12379  	// result: (ANDLconst [c & 0xff] x)
 12380  	for {
 12381  		v_0 := v.Args[0]
 12382  		if v_0.Op != OpAMD64ANDLconst {
 12383  			break
 12384  		}
 12385  		c := v_0.AuxInt
 12386  		x := v_0.Args[0]
 12387  		v.reset(OpAMD64ANDLconst)
 12388  		v.AuxInt = c & 0xff
 12389  		v.AddArg(x)
 12390  		return true
 12391  	}
 12392  	// match: (MOVBQZX (MOVBQZX x))
 12393  	// cond:
 12394  	// result: (MOVBQZX x)
 12395  	for {
 12396  		v_0 := v.Args[0]
 12397  		if v_0.Op != OpAMD64MOVBQZX {
 12398  			break
 12399  		}
 12400  		x := v_0.Args[0]
 12401  		v.reset(OpAMD64MOVBQZX)
 12402  		v.AddArg(x)
 12403  		return true
 12404  	}
 12405  	return false
 12406  }
 12407  func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
 12408  	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
 12409  	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
 12410  	// result: (MOVBQZX x)
 12411  	for {
 12412  		off := v.AuxInt
 12413  		sym := v.Aux
 12414  		_ = v.Args[1]
 12415  		ptr := v.Args[0]
 12416  		v_1 := v.Args[1]
 12417  		if v_1.Op != OpAMD64MOVBstore {
 12418  			break
 12419  		}
 12420  		off2 := v_1.AuxInt
 12421  		sym2 := v_1.Aux
 12422  		_ = v_1.Args[2]
 12423  		ptr2 := v_1.Args[0]
 12424  		x := v_1.Args[1]
 12425  		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 12426  			break
 12427  		}
 12428  		v.reset(OpAMD64MOVBQZX)
 12429  		v.AddArg(x)
 12430  		return true
 12431  	}
 12432  	// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
 12433  	// cond: is32Bit(off1+off2)
 12434  	// result: (MOVBload [off1+off2] {sym} ptr mem)
 12435  	for {
 12436  		off1 := v.AuxInt
 12437  		sym := v.Aux
 12438  		_ = v.Args[1]
 12439  		v_0 := v.Args[0]
 12440  		if v_0.Op != OpAMD64ADDQconst {
 12441  			break
 12442  		}
 12443  		off2 := v_0.AuxInt
 12444  		ptr := v_0.Args[0]
 12445  		mem := v.Args[1]
 12446  		if !(is32Bit(off1 + off2)) {
 12447  			break
 12448  		}
 12449  		v.reset(OpAMD64MOVBload)
 12450  		v.AuxInt = off1 + off2
 12451  		v.Aux = sym
 12452  		v.AddArg(ptr)
 12453  		v.AddArg(mem)
 12454  		return true
 12455  	}
 12456  	// match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
 12457  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12458  	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12459  	for {
 12460  		off1 := v.AuxInt
 12461  		sym1 := v.Aux
 12462  		_ = v.Args[1]
 12463  		v_0 := v.Args[0]
 12464  		if v_0.Op != OpAMD64LEAQ {
 12465  			break
 12466  		}
 12467  		off2 := v_0.AuxInt
 12468  		sym2 := v_0.Aux
 12469  		base := v_0.Args[0]
 12470  		mem := v.Args[1]
 12471  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12472  			break
 12473  		}
 12474  		v.reset(OpAMD64MOVBload)
 12475  		v.AuxInt = off1 + off2
 12476  		v.Aux = mergeSym(sym1, sym2)
 12477  		v.AddArg(base)
 12478  		v.AddArg(mem)
 12479  		return true
 12480  	}
 12481  	// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
 12482  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 12483  	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
 12484  	for {
 12485  		off1 := v.AuxInt
 12486  		sym1 := v.Aux
 12487  		_ = v.Args[1]
 12488  		v_0 := v.Args[0]
 12489  		if v_0.Op != OpAMD64LEAQ1 {
 12490  			break
 12491  		}
 12492  		off2 := v_0.AuxInt
 12493  		sym2 := v_0.Aux
 12494  		_ = v_0.Args[1]
 12495  		ptr := v_0.Args[0]
 12496  		idx := v_0.Args[1]
 12497  		mem := v.Args[1]
 12498  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 12499  			break
 12500  		}
 12501  		v.reset(OpAMD64MOVBloadidx1)
 12502  		v.AuxInt = off1 + off2
 12503  		v.Aux = mergeSym(sym1, sym2)
 12504  		v.AddArg(ptr)
 12505  		v.AddArg(idx)
 12506  		v.AddArg(mem)
 12507  		return true
 12508  	}
 12509  	// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
 12510  	// cond: ptr.Op != OpSB
 12511  	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
 12512  	for {
 12513  		off := v.AuxInt
 12514  		sym := v.Aux
 12515  		_ = v.Args[1]
 12516  		v_0 := v.Args[0]
 12517  		if v_0.Op != OpAMD64ADDQ {
 12518  			break
 12519  		}
 12520  		_ = v_0.Args[1]
 12521  		ptr := v_0.Args[0]
 12522  		idx := v_0.Args[1]
 12523  		mem := v.Args[1]
 12524  		if !(ptr.Op != OpSB) {
 12525  			break
 12526  		}
 12527  		v.reset(OpAMD64MOVBloadidx1)
 12528  		v.AuxInt = off
 12529  		v.Aux = sym
 12530  		v.AddArg(ptr)
 12531  		v.AddArg(idx)
 12532  		v.AddArg(mem)
 12533  		return true
 12534  	}
 12535  	// match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
 12536  	// cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
 12537  	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
 12538  	for {
 12539  		off1 := v.AuxInt
 12540  		sym1 := v.Aux
 12541  		_ = v.Args[1]
 12542  		v_0 := v.Args[0]
 12543  		if v_0.Op != OpAMD64LEAL {
 12544  			break
 12545  		}
 12546  		off2 := v_0.AuxInt
 12547  		sym2 := v_0.Aux
 12548  		base := v_0.Args[0]
 12549  		mem := v.Args[1]
 12550  		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 12551  			break
 12552  		}
 12553  		v.reset(OpAMD64MOVBload)
 12554  		v.AuxInt = off1 + off2
 12555  		v.Aux = mergeSym(sym1, sym2)
 12556  		v.AddArg(base)
 12557  		v.AddArg(mem)
 12558  		return true
 12559  	}
 12560  	// match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
 12561  	// cond: is32Bit(off1+off2)
 12562  	// result: (MOVBload [off1+off2] {sym} ptr mem)
 12563  	for {
 12564  		off1 := v.AuxInt
 12565  		sym := v.Aux
 12566  		_ = v.Args[1]
 12567  		v_0 := v.Args[0]
 12568  		if v_0.Op != OpAMD64ADDLconst {
 12569  			break
 12570  		}
 12571  		off2 := v_0.AuxInt
 12572  		ptr := v_0.Args[0]
 12573  		mem := v.Args[1]
 12574  		if !(is32Bit(off1 + off2)) {
 12575  			break
 12576  		}
 12577  		v.reset(OpAMD64MOVBload)
 12578  		v.AuxInt = off1 + off2
 12579  		v.Aux = sym
 12580  		v.AddArg(ptr)
 12581  		v.AddArg(mem)
 12582  		return true
 12583  	}
 12584  	// match: (MOVBload [off] {sym} (SB) _)
 12585  	// cond: symIsRO(sym)
 12586  	// result: (MOVLconst [int64(read8(sym, off))])
 12587  	for {
 12588  		off := v.AuxInt
 12589  		sym := v.Aux
 12590  		_ = v.Args[1]
 12591  		v_0 := v.Args[0]
 12592  		if v_0.Op != OpSB {
 12593  			break
 12594  		}
 12595  		if !(symIsRO(sym)) {
 12596  			break
 12597  		}
 12598  		v.reset(OpAMD64MOVLconst)
 12599  		v.AuxInt = int64(read8(sym, off))
 12600  		return true
 12601  	}
 12602  	return false
 12603  }
 12604  func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
 12605  	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
 12606  	// cond: is32Bit(c+d)
 12607  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
 12608  	for {
 12609  		c := v.AuxInt
 12610  		sym := v.Aux
 12611  		_ = v.Args[2]
 12612  		v_0 := v.Args[0]
 12613  		if v_0.Op != OpAMD64ADDQconst {
 12614  			break
 12615  		}
 12616  		d := v_0.AuxInt
 12617  		ptr := v_0.Args[0]
 12618  		idx := v.Args[1]
 12619  		mem := v.Args[2]
 12620  		if !(is32Bit(c + d)) {
 12621  			break
 12622  		}
 12623  		v.reset(OpAMD64MOVBloadidx1)
 12624  		v.AuxInt = c + d
 12625  		v.Aux = sym
 12626  		v.AddArg(ptr)
 12627  		v.AddArg(idx)
 12628  		v.AddArg(mem)
 12629  		return true
 12630  	}
 12631  	// match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
 12632  	// cond: is32Bit(c+d)
 12633  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
 12634  	for {
 12635  		c := v.AuxInt
 12636  		sym := v.Aux
 12637  		_ = v.Args[2]
 12638  		idx := v.Args[0]
 12639  		v_1 := v.Args[1]
 12640  		if v_1.Op != OpAMD64ADDQconst {
 12641  			break
 12642  		}
 12643  		d := v_1.AuxInt
 12644  		ptr := v_1.Args[0]
 12645  		mem := v.Args[2]
 12646  		if !(is32Bit(c + d)) {
 12647  			break
 12648  		}
 12649  		v.reset(OpAMD64MOVBloadidx1)
 12650  		v.AuxInt = c + d
 12651  		v.Aux = sym
 12652  		v.AddArg(ptr)
 12653  		v.AddArg(idx)
 12654  		v.AddArg(mem)
 12655  		return true
 12656  	}
 12657  	// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
 12658  	// cond: is32Bit(c+d)
 12659  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
 12660  	for {
 12661  		c := v.AuxInt
 12662  		sym := v.Aux
 12663  		_ = v.Args[2]
 12664  		ptr := v.Args[0]
 12665  		v_1 := v.Args[1]
 12666  		if v_1.Op != OpAMD64ADDQconst {
 12667  			break
 12668  		}
 12669  		d := v_1.AuxInt
 12670  		idx := v_1.Args[0]
 12671  		mem := v.Args[2]
 12672  		if !(is32Bit(c + d)) {
 12673  			break
 12674  		}
 12675  		v.reset(OpAMD64MOVBloadidx1)
 12676  		v.AuxInt = c + d
 12677  		v.Aux = sym
 12678  		v.AddArg(ptr)
 12679  		v.AddArg(idx)
 12680  		v.AddArg(mem)
 12681  		return true
 12682  	}
 12683  	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
 12684  	// cond: is32Bit(c+d)
 12685  	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
 12686  	for {
 12687  		c := v.AuxInt
 12688  		sym := v.Aux
 12689  		_ = v.Args[2]
 12690  		v_0 := v.Args[0]
 12691  		if v_0.Op != OpAMD64ADDQconst {
 12692  			break
 12693  		}
 12694  		d := v_0.AuxInt
 12695  		idx := v_0.Args[0]
 12696  		ptr := v.Args[1]
 12697  		mem := v.Args[2]
 12698  		if !(is32Bit(c + d)) {
 12699  			break
 12700  		}
 12701  		v.reset(OpAMD64MOVBloadidx1)
 12702  		v.AuxInt = c + d
 12703  		v.Aux = sym
 12704  		v.AddArg(ptr)
 12705  		v.AddArg(idx)
 12706  		v.AddArg(mem)
 12707  		return true
 12708  	}
 12709  	// match: (MOVBloadidx1 [i] {s} p (MOVQconst [c]) mem)
 12710  	// cond: is32Bit(i+c)
 12711  	// result: (MOVBload [i+c] {s} p mem)
 12712  	for {
 12713  		i := v.AuxInt
 12714  		s := v.Aux
 12715  		_ = v.Args[2]
 12716  		p := v.Args[0]
 12717  		v_1 := v.Args[1]
 12718  		if v_1.Op != OpAMD64MOVQconst {
 12719  			break
 12720  		}
 12721  		c := v_1.AuxInt
 12722  		mem := v.Args[2]
 12723  		if !(is32Bit(i + c)) {
 12724  			break
 12725  		}
 12726  		v.reset(OpAMD64MOVBload)
 12727  		v.AuxInt = i + c
 12728  		v.Aux = s
 12729  		v.AddArg(p)
 12730  		v.AddArg(mem)
 12731  		return true
 12732  	}
 12733  	// match: (MOVBloadidx1 [i] {s} (MOVQconst [c]) p mem)
 12734  	// cond: is32Bit(i+c)
 12735  	// result: (MOVBload [i+c] {s} p mem)
 12736  	for {
 12737  		i := v.AuxInt
 12738  		s := v.Aux
 12739  		_ = v.Args[2]
 12740  		v_0 := v.Args[0]
 12741  		if v_0.Op != OpAMD64MOVQconst {
 12742  			break
 12743  		}
 12744  		c := v_0.AuxInt
 12745  		p := v.Args[1]
 12746  		mem := v.Args[2]
 12747  		if !(is32Bit(i + c)) {
 12748  			break
 12749  		}
 12750  		v.reset(OpAMD64MOVBload)
 12751  		v.AuxInt = i + c
 12752  		v.Aux = s
 12753  		v.AddArg(p)
 12754  		v.AddArg(mem)
 12755  		return true
 12756  	}
 12757  	return false
 12758  }
 12759  func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
 12760  	// match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
 12761  	// cond: y.Uses == 1
 12762  	// result: (SETLstore [off] {sym} ptr x mem)
 12763  	for {
 12764  		off := v.AuxInt
 12765  		sym := v.Aux
 12766  		_ = v.Args[2]
 12767  		ptr := v.Args[0]
 12768  		y := v.Args[1]
 12769  		if y.Op != OpAMD64SETL {
 12770  			break
 12771  		}
 12772  		x := y.Args[0]
 12773  		mem := v.Args[2]
 12774  		if !(y.Uses == 1) {
 12775  			break
 12776  		}
 12777  		v.reset(OpAMD64SETLstore)
 12778  		v.AuxInt = off
 12779  		v.Aux = sym
 12780  		v.AddArg(ptr)
 12781  		v.AddArg(x)
 12782  		v.AddArg(mem)
 12783  		return true
 12784  	}
 12785  	// match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
 12786  	// cond: y.Uses == 1
 12787  	// result: (SETLEstore [off] {sym} ptr x mem)
 12788  	for {
 12789  		off := v.AuxInt
 12790  		sym := v.Aux
 12791  		_ = v.Args[2]
 12792  		ptr := v.Args[0]
 12793  		y := v.Args[1]
 12794  		if y.Op != OpAMD64SETLE {
 12795  			break
 12796  		}
 12797  		x := y.Args[0]
 12798  		mem := v.Args[2]
 12799  		if !(y.Uses == 1) {
 12800  			break
 12801  		}
 12802  		v.reset(OpAMD64SETLEstore)
 12803  		v.AuxInt = off
 12804  		v.Aux = sym
 12805  		v.AddArg(ptr)
 12806  		v.AddArg(x)
 12807  		v.AddArg(mem)
 12808  		return true
 12809  	}
 12810  	// match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
 12811  	// cond: y.Uses == 1
 12812  	// result: (SETGstore [off] {sym} ptr x mem)
 12813  	for {
 12814  		off := v.AuxInt
 12815  		sym := v.Aux
 12816  		_ = v.Args[2]
 12817  		ptr := v.Args[0]
 12818  		y := v.Args[1]
 12819  		if y.Op != OpAMD64SETG {
 12820  			break
 12821  		}
 12822  		x := y.Args[0]
 12823  		mem := v.Args[2]
 12824  		if !(y.Uses == 1) {
 12825  			break
 12826  		}
 12827  		v.reset(OpAMD64SETGstore)
 12828  		v.AuxInt = off
 12829  		v.Aux = sym
 12830  		v.AddArg(ptr)
 12831  		v.AddArg(x)
 12832  		v.AddArg(mem)
 12833  		return true
 12834  	}
 12835  	// match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
 12836  	// cond: y.Uses == 1
 12837  	// result: (SETGEstore [off] {sym} ptr x mem)
 12838  	for {
 12839  		off := v.AuxInt
 12840  		sym := v.Aux
 12841  		_ = v.Args[2]
 12842  		ptr := v.Args[0]
 12843  		y := v.Args[1]
 12844  		if y.Op != OpAMD64SETGE {
 12845  			break
 12846  		}
 12847  		x := y.Args[0]
 12848  		mem := v.Args[2]
 12849  		if !(y.Uses == 1) {
 12850  			break
 12851  		}
 12852  		v.reset(OpAMD64SETGEstore)
 12853  		v.AuxInt = off
 12854  		v.Aux = sym
 12855  		v.AddArg(ptr)
 12856  		v.AddArg(x)
 12857  		v.AddArg(mem)
 12858  		return true
 12859  	}
 12860  	// match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
 12861  	// cond: y.Uses == 1
 12862  	// result: (SETEQstore [off] {sym} ptr x mem)
 12863  	for {
 12864  		off := v.AuxInt
 12865  		sym := v.Aux
 12866  		_ = v.Args[2]
 12867  		ptr := v.Args[0]
 12868  		y := v.Args[1]
 12869  		if y.Op != OpAMD64SETEQ {
 12870  			break
 12871  		}
 12872  		x := y.Args[0]
 12873  		mem := v.Args[2]
 12874  		if !(y.Uses == 1) {
 12875  			break
 12876  		}
 12877  		v.reset(OpAMD64SETEQstore)
 12878  		v.AuxInt = off
 12879  		v.Aux = sym
 12880  		v.AddArg(ptr)
 12881  		v.AddArg(x)
 12882  		v.AddArg(mem)
 12883  		return true
 12884  	}
 12885  	// match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
 12886  	// cond: y.Uses == 1
 12887  	// result: (SETNEstore [off] {sym} ptr x mem)
 12888  	for {
 12889  		off := v.AuxInt
 12890  		sym := v.Aux
 12891  		_ = v.Args[2]
 12892  		ptr := v.Args[0]
 12893  		y := v.Args[1]
 12894  		if y.Op != OpAMD64SETNE {
 12895  			break
 12896  		}
 12897  		x := y.Args[0]
 12898  		mem := v.Args[2]
 12899  		if !(y.Uses == 1) {
 12900  			break
 12901  		}
 12902  		v.reset(OpAMD64SETNEstore)
 12903  		v.AuxInt = off
 12904  		v.Aux = sym
 12905  		v.AddArg(ptr)
 12906  		v.AddArg(x)
 12907  		v.AddArg(mem)
 12908  		return true
 12909  	}
 12910  	// match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
 12911  	// cond: y.Uses == 1
 12912  	// result: (SETBstore [off] {sym} ptr x mem)
 12913  	for {
 12914  		off := v.AuxInt
 12915  		sym := v.Aux
 12916  		_ = v.Args[2]
 12917  		ptr := v.Args[0]
 12918  		y := v.Args[1]
 12919  		if y.Op != OpAMD64SETB {
 12920  			break
 12921  		}
 12922  		x := y.Args[0]
 12923  		mem := v.Args[2]
 12924  		if !(y.Uses == 1) {
 12925  			break
 12926  		}
 12927  		v.reset(OpAMD64SETBstore)
 12928  		v.AuxInt = off
 12929  		v.Aux = sym
 12930  		v.AddArg(ptr)
 12931  		v.AddArg(x)
 12932  		v.AddArg(mem)
 12933  		return true
 12934  	}
 12935  	// match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
 12936  	// cond: y.Uses == 1
 12937  	// result: (SETBEstore [off] {sym} ptr x mem)
 12938  	for {
 12939  		off := v.AuxInt
 12940  		sym := v.Aux
 12941  		_ = v.Args[2]
 12942  		ptr := v.Args[0]
 12943  		y := v.Args[1]
 12944  		if y.Op != OpAMD64SETBE {
 12945  			break
 12946  		}
 12947  		x := y.Args[0]
 12948  		mem := v.Args[2]
 12949  		if !(y.Uses == 1) {
 12950  			break
 12951  		}
 12952  		v.reset(OpAMD64SETBEstore)
 12953  		v.AuxInt = off
 12954  		v.Aux = sym
 12955  		v.AddArg(ptr)
 12956  		v.AddArg(x)
 12957  		v.AddArg(mem)
 12958  		return true
 12959  	}
 12960  	// match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
 12961  	// cond: y.Uses == 1
 12962  	// result: (SETAstore [off] {sym} ptr x mem)
 12963  	for {
 12964  		off := v.AuxInt
 12965  		sym := v.Aux
 12966  		_ = v.Args[2]
 12967  		ptr := v.Args[0]
 12968  		y := v.Args[1]
 12969  		if y.Op != OpAMD64SETA {
 12970  			break
 12971  		}
 12972  		x := y.Args[0]
 12973  		mem := v.Args[2]
 12974  		if !(y.Uses == 1) {
 12975  			break
 12976  		}
 12977  		v.reset(OpAMD64SETAstore)
 12978  		v.AuxInt = off
 12979  		v.Aux = sym
 12980  		v.AddArg(ptr)
 12981  		v.AddArg(x)
 12982  		v.AddArg(mem)
 12983  		return true
 12984  	}
 12985  	// match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
 12986  	// cond: y.Uses == 1
 12987  	// result: (SETAEstore [off] {sym} ptr x mem)
 12988  	for {
 12989  		off := v.AuxInt
 12990  		sym := v.Aux
 12991  		_ = v.Args[2]
 12992  		ptr := v.Args[0]
 12993  		y := v.Args[1]
 12994  		if y.Op != OpAMD64SETAE {
 12995  			break
 12996  		}
 12997  		x := y.Args[0]
 12998  		mem := v.Args[2]
 12999  		if !(y.Uses == 1) {
 13000  			break
 13001  		}
 13002  		v.reset(OpAMD64SETAEstore)
 13003  		v.AuxInt = off
 13004  		v.Aux = sym
 13005  		v.AddArg(ptr)
 13006  		v.AddArg(x)
 13007  		v.AddArg(mem)
 13008  		return true
 13009  	}
 13010  	return false
 13011  }
 13012  func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
 13013  	b := v.Block
 13014  	_ = b
 13015  	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
 13016  	// cond:
 13017  	// result: (MOVBstore [off] {sym} ptr x mem)
 13018  	for {
 13019  		off := v.AuxInt
 13020  		sym := v.Aux
 13021  		_ = v.Args[2]
 13022  		ptr := v.Args[0]
 13023  		v_1 := v.Args[1]
 13024  		if v_1.Op != OpAMD64MOVBQSX {
 13025  			break
 13026  		}
 13027  		x := v_1.Args[0]
 13028  		mem := v.Args[2]
 13029  		v.reset(OpAMD64MOVBstore)
 13030  		v.AuxInt = off
 13031  		v.Aux = sym
 13032  		v.AddArg(ptr)
 13033  		v.AddArg(x)
 13034  		v.AddArg(mem)
 13035  		return true
 13036  	}
 13037  	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
 13038  	// cond:
 13039  	// result: (MOVBstore [off] {sym} ptr x mem)
 13040  	for {
 13041  		off := v.AuxInt
 13042  		sym := v.Aux
 13043  		_ = v.Args[2]
 13044  		ptr := v.Args[0]
 13045  		v_1 := v.Args[1]
 13046  		if v_1.Op != OpAMD64MOVBQZX {
 13047  			break
 13048  		}
 13049  		x := v_1.Args[0]
 13050  		mem := v.Args[2]
 13051  		v.reset(OpAMD64MOVBstore)
 13052  		v.AuxInt = off
 13053  		v.Aux = sym
 13054  		v.AddArg(ptr)
 13055  		v.AddArg(x)
 13056  		v.AddArg(mem)
 13057  		return true
 13058  	}
 13059  	// match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
 13060  	// cond: is32Bit(off1+off2)
 13061  	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
 13062  	for {
 13063  		off1 := v.AuxInt
 13064  		sym := v.Aux
 13065  		_ = v.Args[2]
 13066  		v_0 := v.Args[0]
 13067  		if v_0.Op != OpAMD64ADDQconst {
 13068  			break
 13069  		}
 13070  		off2 := v_0.AuxInt
 13071  		ptr := v_0.Args[0]
 13072  		val := v.Args[1]
 13073  		mem := v.Args[2]
 13074  		if !(is32Bit(off1 + off2)) {
 13075  			break
 13076  		}
 13077  		v.reset(OpAMD64MOVBstore)
 13078  		v.AuxInt = off1 + off2
 13079  		v.Aux = sym
 13080  		v.AddArg(ptr)
 13081  		v.AddArg(val)
 13082  		v.AddArg(mem)
 13083  		return true
 13084  	}
 13085  	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
 13086  	// cond: validOff(off)
 13087  	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
 13088  	for {
 13089  		off := v.AuxInt
 13090  		sym := v.Aux
 13091  		_ = v.Args[2]
 13092  		ptr := v.Args[0]
 13093  		v_1 := v.Args[1]
 13094  		if v_1.Op != OpAMD64MOVLconst {
 13095  			break
 13096  		}
 13097  		c := v_1.AuxInt
 13098  		mem := v.Args[2]
 13099  		if !(validOff(off)) {
 13100  			break
 13101  		}
 13102  		v.reset(OpAMD64MOVBstoreconst)
 13103  		v.AuxInt = makeValAndOff(int64(int8(c)), off)
 13104  		v.Aux = sym
 13105  		v.AddArg(ptr)
 13106  		v.AddArg(mem)
 13107  		return true
 13108  	}
 13109  	// match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
 13110  	// cond: validOff(off)
 13111  	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
 13112  	for {
 13113  		off := v.AuxInt
 13114  		sym := v.Aux
 13115  		_ = v.Args[2]
 13116  		ptr := v.Args[0]
 13117  		v_1 := v.Args[1]
 13118  		if v_1.Op != OpAMD64MOVQconst {
 13119  			break
 13120  		}
 13121  		c := v_1.AuxInt
 13122  		mem := v.Args[2]
 13123  		if !(validOff(off)) {
 13124  			break
 13125  		}
 13126  		v.reset(OpAMD64MOVBstoreconst)
 13127  		v.AuxInt = makeValAndOff(int64(int8(c)), off)
 13128  		v.Aux = sym
 13129  		v.AddArg(ptr)
 13130  		v.AddArg(mem)
 13131  		return true
 13132  	}
 13133  	// match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
 13134  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 13135  	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
 13136  	for {
 13137  		off1 := v.AuxInt
 13138  		sym1 := v.Aux
 13139  		_ = v.Args[2]
 13140  		v_0 := v.Args[0]
 13141  		if v_0.Op != OpAMD64LEAQ {
 13142  			break
 13143  		}
 13144  		off2 := v_0.AuxInt
 13145  		sym2 := v_0.Aux
 13146  		base := v_0.Args[0]
 13147  		val := v.Args[1]
 13148  		mem := v.Args[2]
 13149  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 13150  			break
 13151  		}
 13152  		v.reset(OpAMD64MOVBstore)
 13153  		v.AuxInt = off1 + off2
 13154  		v.Aux = mergeSym(sym1, sym2)
 13155  		v.AddArg(base)
 13156  		v.AddArg(val)
 13157  		v.AddArg(mem)
 13158  		return true
 13159  	}
 13160  	// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
 13161  	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
 13162  	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
 13163  	for {
 13164  		off1 := v.AuxInt
 13165  		sym1 := v.Aux
 13166  		_ = v.Args[2]
 13167  		v_0 := v.Args[0]
 13168  		if v_0.Op != OpAMD64LEAQ1 {
 13169  			break
 13170  		}
 13171  		off2 := v_0.AuxInt
 13172  		sym2 := v_0.Aux
 13173  		_ = v_0.Args[1]
 13174  		ptr := v_0.Args[0]
 13175  		idx := v_0.Args[1]
 13176  		val := v.Args[1]
 13177  		mem := v.Args[2]
 13178  		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
 13179  			break
 13180  		}
 13181  		v.reset(OpAMD64MOVBstoreidx1)
 13182  		v.AuxInt = off1 + off2
 13183  		v.Aux = mergeSym(sym1, sym2)
 13184  		v.AddArg(ptr)
 13185  		v.AddArg(idx)
 13186  		v.AddArg(val)
 13187  		v.AddArg(mem)
 13188  		return true
 13189  	}
 13190  	// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
 13191  	// cond: ptr.Op != OpSB
 13192  	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
 13193  	for {
 13194  		off := v.AuxInt
 13195  		sym := v.Aux
 13196  		_ = v.Args[2]
 13197  		v_0 := v.Args[0]
 13198  		if v_0.Op != OpAMD64ADDQ {
 13199  			break
 13200  		}
 13201  		_ = v_0.Args[1]
 13202  		ptr := v_0.Args[0]
 13203  		idx := v_0.Args[1]
 13204  		val := v.Args[1]
 13205  		mem := v.Args[2]
 13206  		if !(ptr.Op != OpSB) {
 13207  			break
 13208  		}
 13209  		v.reset(OpAMD64MOVBstoreidx1)
 13210  		v.AuxInt = off
 13211  		v.Aux = sym
 13212  		v.AddArg(ptr)
 13213  		v.AddArg(idx)
 13214  		v.AddArg(val)
 13215  		v.AddArg(mem)
 13216  		return true
 13217  	}
 13218  	// match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
 13219  	// cond: x0.Uses == 1 && clobber(x0)
 13220  	// result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
 13221  	for {
 13222  		i := v.AuxInt
 13223  		s := v.Aux
 13224  		_ = v.Args[2]
 13225  		p := v.Args[0]
 13226  		w := v.Args[1]
 13227  		x0 := v.Args[2]
 13228  		if x0.Op != OpAMD64MOVBstore {
 13229  			break
 13230  		}
 13231  		if x0.AuxInt != i-1 {
 13232  			break
 13233  		}
 13234  		if x0.Aux != s {
 13235  			break
 13236  		}
 13237  		_ = x0.Args[2]
 13238  		if p != x0.Args[0] {
 13239  			break
 13240  		}
 13241  		x0_1 := x0.Args[1]
 13242  		if x0_1.Op != OpAMD64SHRWconst {
 13243  			break
 13244  		}
 13245  		if x0_1.AuxInt != 8 {
 13246  			break
 13247  		}
 13248  		if w != x0_1.Args[0] {
 13249  			break
 13250  		}
 13251  		mem := x0.Args[2]
 13252  		if !(x0.Uses == 1 && clobber(x0)) {
 13253  			break
 13254  		}
 13255  		v.reset(OpAMD64MOVWstore)
 13256  		v.AuxInt = i - 1
 13257  		v.Aux = s
 13258  		v.AddArg(p)
 13259  		v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
 13260  		v0.AuxInt = 8
 13261  		v0.AddArg(w)
 13262  		v.AddArg(v0)
 13263  		v.AddArg(mem)
 13264  		return true
 13265  	}
 13266  	// match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
 13267  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
 13268  	// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
 13269  	for {
 13270  		i := v.AuxInt
 13271  		s := v.Aux
 13272  		_ = v.Args[2]
 13273  		p := v.Args[0]
 13274  		w := v.Args[1]
 13275  		x2 := v.Args[2]
 13276  		if x2.Op != OpAMD64MOVBstore {
 13277  			break
 13278  		}
 13279  		if x2.AuxInt != i-1 {
 13280  			break
 13281  		}
 13282  		if x2.Aux != s {
 13283  			break
 13284  		}
 13285  		_ = x2.Args[2]
 13286  		if p != x2.Args[0] {
 13287  			break
 13288  		}
 13289  		x2_1 := x2.Args[1]
 13290  		if x2_1.Op != OpAMD64SHRLconst {
 13291  			break
 13292  		}
 13293  		if x2_1.AuxInt != 8 {
 13294  			break
 13295  		}
 13296  		if w != x2_1.Args[0] {
 13297  			break
 13298  		}
 13299  		x1 := x2.Args[2]
 13300  		if x1.Op != OpAMD64MOVBstore {
 13301  			break
 13302  		}
 13303  		if x1.AuxInt != i-2 {
 13304  			break
 13305  		}
 13306  		if x1.Aux != s {
 13307  			break
 13308  		}
 13309  		_ = x1.Args[2]
 13310  		if p != x1.Args[0] {
 13311  			break
 13312  		}
 13313  		x1_1 := x1.Args[1]
 13314  		if x1_1.Op != OpAMD64SHRLconst {
 13315  			break
 13316  		}
 13317  		if x1_1.AuxInt != 16 {
 13318  			break
 13319  		}
 13320  		if w != x1_1.Args[0] {
 13321  			break
 13322  		}
 13323  		x0 := x1.Args[2]
 13324  		if x0.Op != OpAMD64MOVBstore {
 13325  			break
 13326  		}
 13327  		if x0.AuxInt != i-3 {
 13328  			break
 13329  		}
 13330  		if x0.Aux != s {
 13331  			break
 13332  		}
 13333  		_ = x0.Args[2]
 13334  		if p != x0.Args[0] {
 13335  			break
 13336  		}
 13337  		x0_1 := x0.Args[1]
 13338  		if x0_1.Op != OpAMD64SHRLconst {
 13339  			break
 13340  		}
 13341  		if x0_1.AuxInt != 24 {
 13342  			break
 13343  		}
 13344  		if w != x0_1.Args[0] {
 13345  			break
 13346  		}
 13347  		mem := x0.Args[2]
 13348  		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
 13349  			break
 13350  		}
 13351  		v.reset(OpAMD64MOVLstore)
 13352  		v.AuxInt = i - 3
 13353  		v.Aux = s
 13354  		v.AddArg(p)
 13355  		v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
 13356  		v0.AddArg(w)
 13357  		v.AddArg(v0)
 13358  		v.AddArg(mem)
 13359  		return true
 13360  	}
 13361  	return false
 13362  }
 13363  func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
 13364  	b := v.Block
 13365  	_ = b
 13366  	typ := &b.Func.Config.Types
 13367  	_ = typ
 13368  	// match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
 13369  	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
 13370  	// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
 13371  	for {
 13372  		i := v.AuxInt
 13373  		s := v.Aux
 13374  		_ = v.Args[2]
 13375  		p := v.Args[0]
 13376  		w := v.Args[1]
 13377  		x6 := v.Args[2]
 13378  		if x6.Op != OpAMD64MOVBstore {
 13379  			break
 13380  		}
 13381  		if x6.AuxInt != i-1 {
 13382  			break
 13383  		}
 13384  		if x6.Aux != s {
 13385  			break
 13386  		}
 13387  		_ = x6.Args[2]
 13388  		if p != x6.Args[0] {
 13389  			break
 13390  		}
 13391  		x6_1 := x6.Args[1]
 13392  		if x6_1.Op != OpAMD64SHRQconst {
 13393  			break
 13394  		}
 13395  		if x6_1.AuxInt != 8 {
 13396  			break
 13397  		}
 13398  		if w != x6_1.Args[0] {
 13399  			break
 13400  		}
 13401  		x5 := x6.Args[2]
 13402  		if x5.Op != OpAMD64MOVBstore {
 13403  			break
 13404  		}
 13405  		if x5.AuxInt != i-2 {
 13406  			break
 13407  		}
 13408  		if x5.Aux != s {
 13409  			break
 13410  		}
 13411  		_ = x5.Args[2]
 13412  		if p != x5.Args[0] {
 13413  			break
 13414  		}
 13415  		x5_1 := x5.Args[1]
 13416  		if x5_1.Op != OpAMD64SHRQconst {
 13417  			break
 13418  		}
 13419  		if x5_1.AuxInt != 16 {
 13420  			break
 13421  		}
 13422  		if w != x5_1.Args[0] {
 13423  			break
 13424  		}
 13425  		x4 := x5.Args[2]
 13426  		if x4.Op != OpAMD64MOVBstore {
 13427  			break
 13428  		}
 13429  		if x4.AuxInt != i-3 {
 13430  			break
 13431  		}
 13432  		if x4.Aux != s {
 13433  			break
 13434  		}
 13435  		_ = x4.Args[2]
 13436  		if p != x4.Args[0] {
 13437  			break
 13438  		}
 13439  		x4_1 := x4.Args[1]
 13440  		if x4_1.Op != OpAMD64SHRQconst {
 13441  			break
 13442  		}
 13443  		if x4_1.AuxInt != 24 {
 13444  			break
 13445  		}
 13446  		if w != x4_1.Args[0] {
 13447  			break
 13448  		}