1
2
3
4 package ssa
5
6 import "math"
7 import "cmd/compile/internal/types"
8
9 func rewriteValueAMD64(v *Value) bool {
10 switch v.Op {
11 case OpAMD64ADCQ:
12 return rewriteValueAMD64_OpAMD64ADCQ(v)
13 case OpAMD64ADCQconst:
14 return rewriteValueAMD64_OpAMD64ADCQconst(v)
15 case OpAMD64ADDL:
16 return rewriteValueAMD64_OpAMD64ADDL(v)
17 case OpAMD64ADDLconst:
18 return rewriteValueAMD64_OpAMD64ADDLconst(v)
19 case OpAMD64ADDLconstmodify:
20 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
21 case OpAMD64ADDLload:
22 return rewriteValueAMD64_OpAMD64ADDLload(v)
23 case OpAMD64ADDLmodify:
24 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
25 case OpAMD64ADDQ:
26 return rewriteValueAMD64_OpAMD64ADDQ(v)
27 case OpAMD64ADDQcarry:
28 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
29 case OpAMD64ADDQconst:
30 return rewriteValueAMD64_OpAMD64ADDQconst(v)
31 case OpAMD64ADDQconstmodify:
32 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
33 case OpAMD64ADDQload:
34 return rewriteValueAMD64_OpAMD64ADDQload(v)
35 case OpAMD64ADDQmodify:
36 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
37 case OpAMD64ADDSD:
38 return rewriteValueAMD64_OpAMD64ADDSD(v)
39 case OpAMD64ADDSDload:
40 return rewriteValueAMD64_OpAMD64ADDSDload(v)
41 case OpAMD64ADDSS:
42 return rewriteValueAMD64_OpAMD64ADDSS(v)
43 case OpAMD64ADDSSload:
44 return rewriteValueAMD64_OpAMD64ADDSSload(v)
45 case OpAMD64ANDL:
46 return rewriteValueAMD64_OpAMD64ANDL(v)
47 case OpAMD64ANDLconst:
48 return rewriteValueAMD64_OpAMD64ANDLconst(v)
49 case OpAMD64ANDLconstmodify:
50 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
51 case OpAMD64ANDLload:
52 return rewriteValueAMD64_OpAMD64ANDLload(v)
53 case OpAMD64ANDLmodify:
54 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
55 case OpAMD64ANDQ:
56 return rewriteValueAMD64_OpAMD64ANDQ(v)
57 case OpAMD64ANDQconst:
58 return rewriteValueAMD64_OpAMD64ANDQconst(v)
59 case OpAMD64ANDQconstmodify:
60 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
61 case OpAMD64ANDQload:
62 return rewriteValueAMD64_OpAMD64ANDQload(v)
63 case OpAMD64ANDQmodify:
64 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
65 case OpAMD64BSFQ:
66 return rewriteValueAMD64_OpAMD64BSFQ(v)
67 case OpAMD64BTCLconst:
68 return rewriteValueAMD64_OpAMD64BTCLconst(v)
69 case OpAMD64BTCLconstmodify:
70 return rewriteValueAMD64_OpAMD64BTCLconstmodify(v)
71 case OpAMD64BTCLmodify:
72 return rewriteValueAMD64_OpAMD64BTCLmodify(v)
73 case OpAMD64BTCQconst:
74 return rewriteValueAMD64_OpAMD64BTCQconst(v)
75 case OpAMD64BTCQconstmodify:
76 return rewriteValueAMD64_OpAMD64BTCQconstmodify(v)
77 case OpAMD64BTCQmodify:
78 return rewriteValueAMD64_OpAMD64BTCQmodify(v)
79 case OpAMD64BTLconst:
80 return rewriteValueAMD64_OpAMD64BTLconst(v)
81 case OpAMD64BTQconst:
82 return rewriteValueAMD64_OpAMD64BTQconst(v)
83 case OpAMD64BTRLconst:
84 return rewriteValueAMD64_OpAMD64BTRLconst(v)
85 case OpAMD64BTRLconstmodify:
86 return rewriteValueAMD64_OpAMD64BTRLconstmodify(v)
87 case OpAMD64BTRLmodify:
88 return rewriteValueAMD64_OpAMD64BTRLmodify(v)
89 case OpAMD64BTRQconst:
90 return rewriteValueAMD64_OpAMD64BTRQconst(v)
91 case OpAMD64BTRQconstmodify:
92 return rewriteValueAMD64_OpAMD64BTRQconstmodify(v)
93 case OpAMD64BTRQmodify:
94 return rewriteValueAMD64_OpAMD64BTRQmodify(v)
95 case OpAMD64BTSLconst:
96 return rewriteValueAMD64_OpAMD64BTSLconst(v)
97 case OpAMD64BTSLconstmodify:
98 return rewriteValueAMD64_OpAMD64BTSLconstmodify(v)
99 case OpAMD64BTSLmodify:
100 return rewriteValueAMD64_OpAMD64BTSLmodify(v)
101 case OpAMD64BTSQconst:
102 return rewriteValueAMD64_OpAMD64BTSQconst(v)
103 case OpAMD64BTSQconstmodify:
104 return rewriteValueAMD64_OpAMD64BTSQconstmodify(v)
105 case OpAMD64BTSQmodify:
106 return rewriteValueAMD64_OpAMD64BTSQmodify(v)
107 case OpAMD64CMOVLCC:
108 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
109 case OpAMD64CMOVLCS:
110 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
111 case OpAMD64CMOVLEQ:
112 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
113 case OpAMD64CMOVLGE:
114 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
115 case OpAMD64CMOVLGT:
116 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
117 case OpAMD64CMOVLHI:
118 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
119 case OpAMD64CMOVLLE:
120 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
121 case OpAMD64CMOVLLS:
122 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
123 case OpAMD64CMOVLLT:
124 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
125 case OpAMD64CMOVLNE:
126 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
127 case OpAMD64CMOVQCC:
128 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
129 case OpAMD64CMOVQCS:
130 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
131 case OpAMD64CMOVQEQ:
132 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
133 case OpAMD64CMOVQGE:
134 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
135 case OpAMD64CMOVQGT:
136 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
137 case OpAMD64CMOVQHI:
138 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
139 case OpAMD64CMOVQLE:
140 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
141 case OpAMD64CMOVQLS:
142 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
143 case OpAMD64CMOVQLT:
144 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
145 case OpAMD64CMOVQNE:
146 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
147 case OpAMD64CMOVWCC:
148 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
149 case OpAMD64CMOVWCS:
150 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
151 case OpAMD64CMOVWEQ:
152 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
153 case OpAMD64CMOVWGE:
154 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
155 case OpAMD64CMOVWGT:
156 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
157 case OpAMD64CMOVWHI:
158 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
159 case OpAMD64CMOVWLE:
160 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
161 case OpAMD64CMOVWLS:
162 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
163 case OpAMD64CMOVWLT:
164 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
165 case OpAMD64CMOVWNE:
166 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
167 case OpAMD64CMPB:
168 return rewriteValueAMD64_OpAMD64CMPB(v)
169 case OpAMD64CMPBconst:
170 return rewriteValueAMD64_OpAMD64CMPBconst(v)
171 case OpAMD64CMPBconstload:
172 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
173 case OpAMD64CMPBload:
174 return rewriteValueAMD64_OpAMD64CMPBload(v)
175 case OpAMD64CMPL:
176 return rewriteValueAMD64_OpAMD64CMPL(v)
177 case OpAMD64CMPLconst:
178 return rewriteValueAMD64_OpAMD64CMPLconst(v)
179 case OpAMD64CMPLconstload:
180 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
181 case OpAMD64CMPLload:
182 return rewriteValueAMD64_OpAMD64CMPLload(v)
183 case OpAMD64CMPQ:
184 return rewriteValueAMD64_OpAMD64CMPQ(v)
185 case OpAMD64CMPQconst:
186 return rewriteValueAMD64_OpAMD64CMPQconst(v)
187 case OpAMD64CMPQconstload:
188 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
189 case OpAMD64CMPQload:
190 return rewriteValueAMD64_OpAMD64CMPQload(v)
191 case OpAMD64CMPW:
192 return rewriteValueAMD64_OpAMD64CMPW(v)
193 case OpAMD64CMPWconst:
194 return rewriteValueAMD64_OpAMD64CMPWconst(v)
195 case OpAMD64CMPWconstload:
196 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
197 case OpAMD64CMPWload:
198 return rewriteValueAMD64_OpAMD64CMPWload(v)
199 case OpAMD64CMPXCHGLlock:
200 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
201 case OpAMD64CMPXCHGQlock:
202 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
203 case OpAMD64DIVSD:
204 return rewriteValueAMD64_OpAMD64DIVSD(v)
205 case OpAMD64DIVSDload:
206 return rewriteValueAMD64_OpAMD64DIVSDload(v)
207 case OpAMD64DIVSS:
208 return rewriteValueAMD64_OpAMD64DIVSS(v)
209 case OpAMD64DIVSSload:
210 return rewriteValueAMD64_OpAMD64DIVSSload(v)
211 case OpAMD64HMULL:
212 return rewriteValueAMD64_OpAMD64HMULL(v)
213 case OpAMD64HMULLU:
214 return rewriteValueAMD64_OpAMD64HMULLU(v)
215 case OpAMD64HMULQ:
216 return rewriteValueAMD64_OpAMD64HMULQ(v)
217 case OpAMD64HMULQU:
218 return rewriteValueAMD64_OpAMD64HMULQU(v)
219 case OpAMD64LEAL:
220 return rewriteValueAMD64_OpAMD64LEAL(v)
221 case OpAMD64LEAL1:
222 return rewriteValueAMD64_OpAMD64LEAL1(v)
223 case OpAMD64LEAL2:
224 return rewriteValueAMD64_OpAMD64LEAL2(v)
225 case OpAMD64LEAL4:
226 return rewriteValueAMD64_OpAMD64LEAL4(v)
227 case OpAMD64LEAL8:
228 return rewriteValueAMD64_OpAMD64LEAL8(v)
229 case OpAMD64LEAQ:
230 return rewriteValueAMD64_OpAMD64LEAQ(v)
231 case OpAMD64LEAQ1:
232 return rewriteValueAMD64_OpAMD64LEAQ1(v)
233 case OpAMD64LEAQ2:
234 return rewriteValueAMD64_OpAMD64LEAQ2(v)
235 case OpAMD64LEAQ4:
236 return rewriteValueAMD64_OpAMD64LEAQ4(v)
237 case OpAMD64LEAQ8:
238 return rewriteValueAMD64_OpAMD64LEAQ8(v)
239 case OpAMD64MOVBQSX:
240 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
241 case OpAMD64MOVBQSXload:
242 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
243 case OpAMD64MOVBQZX:
244 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
245 case OpAMD64MOVBatomicload:
246 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
247 case OpAMD64MOVBload:
248 return rewriteValueAMD64_OpAMD64MOVBload(v)
249 case OpAMD64MOVBstore:
250 return rewriteValueAMD64_OpAMD64MOVBstore(v)
251 case OpAMD64MOVBstoreconst:
252 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
253 case OpAMD64MOVLQSX:
254 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
255 case OpAMD64MOVLQSXload:
256 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
257 case OpAMD64MOVLQZX:
258 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
259 case OpAMD64MOVLatomicload:
260 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
261 case OpAMD64MOVLf2i:
262 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
263 case OpAMD64MOVLi2f:
264 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
265 case OpAMD64MOVLload:
266 return rewriteValueAMD64_OpAMD64MOVLload(v)
267 case OpAMD64MOVLstore:
268 return rewriteValueAMD64_OpAMD64MOVLstore(v)
269 case OpAMD64MOVLstoreconst:
270 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
271 case OpAMD64MOVOload:
272 return rewriteValueAMD64_OpAMD64MOVOload(v)
273 case OpAMD64MOVOstore:
274 return rewriteValueAMD64_OpAMD64MOVOstore(v)
275 case OpAMD64MOVQatomicload:
276 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
277 case OpAMD64MOVQf2i:
278 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
279 case OpAMD64MOVQi2f:
280 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
281 case OpAMD64MOVQload:
282 return rewriteValueAMD64_OpAMD64MOVQload(v)
283 case OpAMD64MOVQstore:
284 return rewriteValueAMD64_OpAMD64MOVQstore(v)
285 case OpAMD64MOVQstoreconst:
286 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
287 case OpAMD64MOVSDload:
288 return rewriteValueAMD64_OpAMD64MOVSDload(v)
289 case OpAMD64MOVSDstore:
290 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
291 case OpAMD64MOVSSload:
292 return rewriteValueAMD64_OpAMD64MOVSSload(v)
293 case OpAMD64MOVSSstore:
294 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
295 case OpAMD64MOVWQSX:
296 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
297 case OpAMD64MOVWQSXload:
298 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
299 case OpAMD64MOVWQZX:
300 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
301 case OpAMD64MOVWload:
302 return rewriteValueAMD64_OpAMD64MOVWload(v)
303 case OpAMD64MOVWstore:
304 return rewriteValueAMD64_OpAMD64MOVWstore(v)
305 case OpAMD64MOVWstoreconst:
306 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
307 case OpAMD64MULL:
308 return rewriteValueAMD64_OpAMD64MULL(v)
309 case OpAMD64MULLconst:
310 return rewriteValueAMD64_OpAMD64MULLconst(v)
311 case OpAMD64MULQ:
312 return rewriteValueAMD64_OpAMD64MULQ(v)
313 case OpAMD64MULQconst:
314 return rewriteValueAMD64_OpAMD64MULQconst(v)
315 case OpAMD64MULSD:
316 return rewriteValueAMD64_OpAMD64MULSD(v)
317 case OpAMD64MULSDload:
318 return rewriteValueAMD64_OpAMD64MULSDload(v)
319 case OpAMD64MULSS:
320 return rewriteValueAMD64_OpAMD64MULSS(v)
321 case OpAMD64MULSSload:
322 return rewriteValueAMD64_OpAMD64MULSSload(v)
323 case OpAMD64NEGL:
324 return rewriteValueAMD64_OpAMD64NEGL(v)
325 case OpAMD64NEGQ:
326 return rewriteValueAMD64_OpAMD64NEGQ(v)
327 case OpAMD64NOTL:
328 return rewriteValueAMD64_OpAMD64NOTL(v)
329 case OpAMD64NOTQ:
330 return rewriteValueAMD64_OpAMD64NOTQ(v)
331 case OpAMD64ORL:
332 return rewriteValueAMD64_OpAMD64ORL(v)
333 case OpAMD64ORLconst:
334 return rewriteValueAMD64_OpAMD64ORLconst(v)
335 case OpAMD64ORLconstmodify:
336 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
337 case OpAMD64ORLload:
338 return rewriteValueAMD64_OpAMD64ORLload(v)
339 case OpAMD64ORLmodify:
340 return rewriteValueAMD64_OpAMD64ORLmodify(v)
341 case OpAMD64ORQ:
342 return rewriteValueAMD64_OpAMD64ORQ(v)
343 case OpAMD64ORQconst:
344 return rewriteValueAMD64_OpAMD64ORQconst(v)
345 case OpAMD64ORQconstmodify:
346 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
347 case OpAMD64ORQload:
348 return rewriteValueAMD64_OpAMD64ORQload(v)
349 case OpAMD64ORQmodify:
350 return rewriteValueAMD64_OpAMD64ORQmodify(v)
351 case OpAMD64ROLB:
352 return rewriteValueAMD64_OpAMD64ROLB(v)
353 case OpAMD64ROLBconst:
354 return rewriteValueAMD64_OpAMD64ROLBconst(v)
355 case OpAMD64ROLL:
356 return rewriteValueAMD64_OpAMD64ROLL(v)
357 case OpAMD64ROLLconst:
358 return rewriteValueAMD64_OpAMD64ROLLconst(v)
359 case OpAMD64ROLQ:
360 return rewriteValueAMD64_OpAMD64ROLQ(v)
361 case OpAMD64ROLQconst:
362 return rewriteValueAMD64_OpAMD64ROLQconst(v)
363 case OpAMD64ROLW:
364 return rewriteValueAMD64_OpAMD64ROLW(v)
365 case OpAMD64ROLWconst:
366 return rewriteValueAMD64_OpAMD64ROLWconst(v)
367 case OpAMD64RORB:
368 return rewriteValueAMD64_OpAMD64RORB(v)
369 case OpAMD64RORL:
370 return rewriteValueAMD64_OpAMD64RORL(v)
371 case OpAMD64RORQ:
372 return rewriteValueAMD64_OpAMD64RORQ(v)
373 case OpAMD64RORW:
374 return rewriteValueAMD64_OpAMD64RORW(v)
375 case OpAMD64SARB:
376 return rewriteValueAMD64_OpAMD64SARB(v)
377 case OpAMD64SARBconst:
378 return rewriteValueAMD64_OpAMD64SARBconst(v)
379 case OpAMD64SARL:
380 return rewriteValueAMD64_OpAMD64SARL(v)
381 case OpAMD64SARLconst:
382 return rewriteValueAMD64_OpAMD64SARLconst(v)
383 case OpAMD64SARQ:
384 return rewriteValueAMD64_OpAMD64SARQ(v)
385 case OpAMD64SARQconst:
386 return rewriteValueAMD64_OpAMD64SARQconst(v)
387 case OpAMD64SARW:
388 return rewriteValueAMD64_OpAMD64SARW(v)
389 case OpAMD64SARWconst:
390 return rewriteValueAMD64_OpAMD64SARWconst(v)
391 case OpAMD64SBBLcarrymask:
392 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
393 case OpAMD64SBBQ:
394 return rewriteValueAMD64_OpAMD64SBBQ(v)
395 case OpAMD64SBBQcarrymask:
396 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
397 case OpAMD64SBBQconst:
398 return rewriteValueAMD64_OpAMD64SBBQconst(v)
399 case OpAMD64SETA:
400 return rewriteValueAMD64_OpAMD64SETA(v)
401 case OpAMD64SETAE:
402 return rewriteValueAMD64_OpAMD64SETAE(v)
403 case OpAMD64SETAEstore:
404 return rewriteValueAMD64_OpAMD64SETAEstore(v)
405 case OpAMD64SETAstore:
406 return rewriteValueAMD64_OpAMD64SETAstore(v)
407 case OpAMD64SETB:
408 return rewriteValueAMD64_OpAMD64SETB(v)
409 case OpAMD64SETBE:
410 return rewriteValueAMD64_OpAMD64SETBE(v)
411 case OpAMD64SETBEstore:
412 return rewriteValueAMD64_OpAMD64SETBEstore(v)
413 case OpAMD64SETBstore:
414 return rewriteValueAMD64_OpAMD64SETBstore(v)
415 case OpAMD64SETEQ:
416 return rewriteValueAMD64_OpAMD64SETEQ(v)
417 case OpAMD64SETEQstore:
418 return rewriteValueAMD64_OpAMD64SETEQstore(v)
419 case OpAMD64SETG:
420 return rewriteValueAMD64_OpAMD64SETG(v)
421 case OpAMD64SETGE:
422 return rewriteValueAMD64_OpAMD64SETGE(v)
423 case OpAMD64SETGEstore:
424 return rewriteValueAMD64_OpAMD64SETGEstore(v)
425 case OpAMD64SETGstore:
426 return rewriteValueAMD64_OpAMD64SETGstore(v)
427 case OpAMD64SETL:
428 return rewriteValueAMD64_OpAMD64SETL(v)
429 case OpAMD64SETLE:
430 return rewriteValueAMD64_OpAMD64SETLE(v)
431 case OpAMD64SETLEstore:
432 return rewriteValueAMD64_OpAMD64SETLEstore(v)
433 case OpAMD64SETLstore:
434 return rewriteValueAMD64_OpAMD64SETLstore(v)
435 case OpAMD64SETNE:
436 return rewriteValueAMD64_OpAMD64SETNE(v)
437 case OpAMD64SETNEstore:
438 return rewriteValueAMD64_OpAMD64SETNEstore(v)
439 case OpAMD64SHLL:
440 return rewriteValueAMD64_OpAMD64SHLL(v)
441 case OpAMD64SHLLconst:
442 return rewriteValueAMD64_OpAMD64SHLLconst(v)
443 case OpAMD64SHLQ:
444 return rewriteValueAMD64_OpAMD64SHLQ(v)
445 case OpAMD64SHLQconst:
446 return rewriteValueAMD64_OpAMD64SHLQconst(v)
447 case OpAMD64SHRB:
448 return rewriteValueAMD64_OpAMD64SHRB(v)
449 case OpAMD64SHRBconst:
450 return rewriteValueAMD64_OpAMD64SHRBconst(v)
451 case OpAMD64SHRL:
452 return rewriteValueAMD64_OpAMD64SHRL(v)
453 case OpAMD64SHRLconst:
454 return rewriteValueAMD64_OpAMD64SHRLconst(v)
455 case OpAMD64SHRQ:
456 return rewriteValueAMD64_OpAMD64SHRQ(v)
457 case OpAMD64SHRQconst:
458 return rewriteValueAMD64_OpAMD64SHRQconst(v)
459 case OpAMD64SHRW:
460 return rewriteValueAMD64_OpAMD64SHRW(v)
461 case OpAMD64SHRWconst:
462 return rewriteValueAMD64_OpAMD64SHRWconst(v)
463 case OpAMD64SUBL:
464 return rewriteValueAMD64_OpAMD64SUBL(v)
465 case OpAMD64SUBLconst:
466 return rewriteValueAMD64_OpAMD64SUBLconst(v)
467 case OpAMD64SUBLload:
468 return rewriteValueAMD64_OpAMD64SUBLload(v)
469 case OpAMD64SUBLmodify:
470 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
471 case OpAMD64SUBQ:
472 return rewriteValueAMD64_OpAMD64SUBQ(v)
473 case OpAMD64SUBQborrow:
474 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
475 case OpAMD64SUBQconst:
476 return rewriteValueAMD64_OpAMD64SUBQconst(v)
477 case OpAMD64SUBQload:
478 return rewriteValueAMD64_OpAMD64SUBQload(v)
479 case OpAMD64SUBQmodify:
480 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
481 case OpAMD64SUBSD:
482 return rewriteValueAMD64_OpAMD64SUBSD(v)
483 case OpAMD64SUBSDload:
484 return rewriteValueAMD64_OpAMD64SUBSDload(v)
485 case OpAMD64SUBSS:
486 return rewriteValueAMD64_OpAMD64SUBSS(v)
487 case OpAMD64SUBSSload:
488 return rewriteValueAMD64_OpAMD64SUBSSload(v)
489 case OpAMD64TESTB:
490 return rewriteValueAMD64_OpAMD64TESTB(v)
491 case OpAMD64TESTBconst:
492 return rewriteValueAMD64_OpAMD64TESTBconst(v)
493 case OpAMD64TESTL:
494 return rewriteValueAMD64_OpAMD64TESTL(v)
495 case OpAMD64TESTLconst:
496 return rewriteValueAMD64_OpAMD64TESTLconst(v)
497 case OpAMD64TESTQ:
498 return rewriteValueAMD64_OpAMD64TESTQ(v)
499 case OpAMD64TESTQconst:
500 return rewriteValueAMD64_OpAMD64TESTQconst(v)
501 case OpAMD64TESTW:
502 return rewriteValueAMD64_OpAMD64TESTW(v)
503 case OpAMD64TESTWconst:
504 return rewriteValueAMD64_OpAMD64TESTWconst(v)
505 case OpAMD64XADDLlock:
506 return rewriteValueAMD64_OpAMD64XADDLlock(v)
507 case OpAMD64XADDQlock:
508 return rewriteValueAMD64_OpAMD64XADDQlock(v)
509 case OpAMD64XCHGL:
510 return rewriteValueAMD64_OpAMD64XCHGL(v)
511 case OpAMD64XCHGQ:
512 return rewriteValueAMD64_OpAMD64XCHGQ(v)
513 case OpAMD64XORL:
514 return rewriteValueAMD64_OpAMD64XORL(v)
515 case OpAMD64XORLconst:
516 return rewriteValueAMD64_OpAMD64XORLconst(v)
517 case OpAMD64XORLconstmodify:
518 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
519 case OpAMD64XORLload:
520 return rewriteValueAMD64_OpAMD64XORLload(v)
521 case OpAMD64XORLmodify:
522 return rewriteValueAMD64_OpAMD64XORLmodify(v)
523 case OpAMD64XORQ:
524 return rewriteValueAMD64_OpAMD64XORQ(v)
525 case OpAMD64XORQconst:
526 return rewriteValueAMD64_OpAMD64XORQconst(v)
527 case OpAMD64XORQconstmodify:
528 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
529 case OpAMD64XORQload:
530 return rewriteValueAMD64_OpAMD64XORQload(v)
531 case OpAMD64XORQmodify:
532 return rewriteValueAMD64_OpAMD64XORQmodify(v)
533 case OpAdd16:
534 v.Op = OpAMD64ADDL
535 return true
536 case OpAdd32:
537 v.Op = OpAMD64ADDL
538 return true
539 case OpAdd32F:
540 v.Op = OpAMD64ADDSS
541 return true
542 case OpAdd64:
543 v.Op = OpAMD64ADDQ
544 return true
545 case OpAdd64F:
546 v.Op = OpAMD64ADDSD
547 return true
548 case OpAdd8:
549 v.Op = OpAMD64ADDL
550 return true
551 case OpAddPtr:
552 v.Op = OpAMD64ADDQ
553 return true
554 case OpAddr:
555 return rewriteValueAMD64_OpAddr(v)
556 case OpAnd16:
557 v.Op = OpAMD64ANDL
558 return true
559 case OpAnd32:
560 v.Op = OpAMD64ANDL
561 return true
562 case OpAnd64:
563 v.Op = OpAMD64ANDQ
564 return true
565 case OpAnd8:
566 v.Op = OpAMD64ANDL
567 return true
568 case OpAndB:
569 v.Op = OpAMD64ANDL
570 return true
571 case OpAtomicAdd32:
572 return rewriteValueAMD64_OpAtomicAdd32(v)
573 case OpAtomicAdd64:
574 return rewriteValueAMD64_OpAtomicAdd64(v)
575 case OpAtomicAnd32:
576 return rewriteValueAMD64_OpAtomicAnd32(v)
577 case OpAtomicAnd8:
578 return rewriteValueAMD64_OpAtomicAnd8(v)
579 case OpAtomicCompareAndSwap32:
580 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
581 case OpAtomicCompareAndSwap64:
582 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
583 case OpAtomicExchange32:
584 return rewriteValueAMD64_OpAtomicExchange32(v)
585 case OpAtomicExchange64:
586 return rewriteValueAMD64_OpAtomicExchange64(v)
587 case OpAtomicLoad32:
588 return rewriteValueAMD64_OpAtomicLoad32(v)
589 case OpAtomicLoad64:
590 return rewriteValueAMD64_OpAtomicLoad64(v)
591 case OpAtomicLoad8:
592 return rewriteValueAMD64_OpAtomicLoad8(v)
593 case OpAtomicLoadPtr:
594 return rewriteValueAMD64_OpAtomicLoadPtr(v)
595 case OpAtomicOr32:
596 return rewriteValueAMD64_OpAtomicOr32(v)
597 case OpAtomicOr8:
598 return rewriteValueAMD64_OpAtomicOr8(v)
599 case OpAtomicStore32:
600 return rewriteValueAMD64_OpAtomicStore32(v)
601 case OpAtomicStore64:
602 return rewriteValueAMD64_OpAtomicStore64(v)
603 case OpAtomicStore8:
604 return rewriteValueAMD64_OpAtomicStore8(v)
605 case OpAtomicStorePtrNoWB:
606 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
607 case OpAvg64u:
608 v.Op = OpAMD64AVGQU
609 return true
610 case OpBitLen16:
611 return rewriteValueAMD64_OpBitLen16(v)
612 case OpBitLen32:
613 return rewriteValueAMD64_OpBitLen32(v)
614 case OpBitLen64:
615 return rewriteValueAMD64_OpBitLen64(v)
616 case OpBitLen8:
617 return rewriteValueAMD64_OpBitLen8(v)
618 case OpBswap32:
619 v.Op = OpAMD64BSWAPL
620 return true
621 case OpBswap64:
622 v.Op = OpAMD64BSWAPQ
623 return true
624 case OpCeil:
625 return rewriteValueAMD64_OpCeil(v)
626 case OpClosureCall:
627 v.Op = OpAMD64CALLclosure
628 return true
629 case OpCom16:
630 v.Op = OpAMD64NOTL
631 return true
632 case OpCom32:
633 v.Op = OpAMD64NOTL
634 return true
635 case OpCom64:
636 v.Op = OpAMD64NOTQ
637 return true
638 case OpCom8:
639 v.Op = OpAMD64NOTL
640 return true
641 case OpCondSelect:
642 return rewriteValueAMD64_OpCondSelect(v)
643 case OpConst16:
644 return rewriteValueAMD64_OpConst16(v)
645 case OpConst32:
646 v.Op = OpAMD64MOVLconst
647 return true
648 case OpConst32F:
649 v.Op = OpAMD64MOVSSconst
650 return true
651 case OpConst64:
652 v.Op = OpAMD64MOVQconst
653 return true
654 case OpConst64F:
655 v.Op = OpAMD64MOVSDconst
656 return true
657 case OpConst8:
658 return rewriteValueAMD64_OpConst8(v)
659 case OpConstBool:
660 return rewriteValueAMD64_OpConstBool(v)
661 case OpConstNil:
662 return rewriteValueAMD64_OpConstNil(v)
663 case OpCtz16:
664 return rewriteValueAMD64_OpCtz16(v)
665 case OpCtz16NonZero:
666 v.Op = OpAMD64BSFL
667 return true
668 case OpCtz32:
669 return rewriteValueAMD64_OpCtz32(v)
670 case OpCtz32NonZero:
671 v.Op = OpAMD64BSFL
672 return true
673 case OpCtz64:
674 return rewriteValueAMD64_OpCtz64(v)
675 case OpCtz64NonZero:
676 return rewriteValueAMD64_OpCtz64NonZero(v)
677 case OpCtz8:
678 return rewriteValueAMD64_OpCtz8(v)
679 case OpCtz8NonZero:
680 v.Op = OpAMD64BSFL
681 return true
682 case OpCvt32Fto32:
683 v.Op = OpAMD64CVTTSS2SL
684 return true
685 case OpCvt32Fto64:
686 v.Op = OpAMD64CVTTSS2SQ
687 return true
688 case OpCvt32Fto64F:
689 v.Op = OpAMD64CVTSS2SD
690 return true
691 case OpCvt32to32F:
692 v.Op = OpAMD64CVTSL2SS
693 return true
694 case OpCvt32to64F:
695 v.Op = OpAMD64CVTSL2SD
696 return true
697 case OpCvt64Fto32:
698 v.Op = OpAMD64CVTTSD2SL
699 return true
700 case OpCvt64Fto32F:
701 v.Op = OpAMD64CVTSD2SS
702 return true
703 case OpCvt64Fto64:
704 v.Op = OpAMD64CVTTSD2SQ
705 return true
706 case OpCvt64to32F:
707 v.Op = OpAMD64CVTSQ2SS
708 return true
709 case OpCvt64to64F:
710 v.Op = OpAMD64CVTSQ2SD
711 return true
712 case OpCvtBoolToUint8:
713 v.Op = OpCopy
714 return true
715 case OpDiv128u:
716 v.Op = OpAMD64DIVQU2
717 return true
718 case OpDiv16:
719 return rewriteValueAMD64_OpDiv16(v)
720 case OpDiv16u:
721 return rewriteValueAMD64_OpDiv16u(v)
722 case OpDiv32:
723 return rewriteValueAMD64_OpDiv32(v)
724 case OpDiv32F:
725 v.Op = OpAMD64DIVSS
726 return true
727 case OpDiv32u:
728 return rewriteValueAMD64_OpDiv32u(v)
729 case OpDiv64:
730 return rewriteValueAMD64_OpDiv64(v)
731 case OpDiv64F:
732 v.Op = OpAMD64DIVSD
733 return true
734 case OpDiv64u:
735 return rewriteValueAMD64_OpDiv64u(v)
736 case OpDiv8:
737 return rewriteValueAMD64_OpDiv8(v)
738 case OpDiv8u:
739 return rewriteValueAMD64_OpDiv8u(v)
740 case OpEq16:
741 return rewriteValueAMD64_OpEq16(v)
742 case OpEq32:
743 return rewriteValueAMD64_OpEq32(v)
744 case OpEq32F:
745 return rewriteValueAMD64_OpEq32F(v)
746 case OpEq64:
747 return rewriteValueAMD64_OpEq64(v)
748 case OpEq64F:
749 return rewriteValueAMD64_OpEq64F(v)
750 case OpEq8:
751 return rewriteValueAMD64_OpEq8(v)
752 case OpEqB:
753 return rewriteValueAMD64_OpEqB(v)
754 case OpEqPtr:
755 return rewriteValueAMD64_OpEqPtr(v)
756 case OpFMA:
757 return rewriteValueAMD64_OpFMA(v)
758 case OpFloor:
759 return rewriteValueAMD64_OpFloor(v)
760 case OpGetCallerPC:
761 v.Op = OpAMD64LoweredGetCallerPC
762 return true
763 case OpGetCallerSP:
764 v.Op = OpAMD64LoweredGetCallerSP
765 return true
766 case OpGetClosurePtr:
767 v.Op = OpAMD64LoweredGetClosurePtr
768 return true
769 case OpGetG:
770 v.Op = OpAMD64LoweredGetG
771 return true
772 case OpHasCPUFeature:
773 return rewriteValueAMD64_OpHasCPUFeature(v)
774 case OpHmul32:
775 v.Op = OpAMD64HMULL
776 return true
777 case OpHmul32u:
778 v.Op = OpAMD64HMULLU
779 return true
780 case OpHmul64:
781 v.Op = OpAMD64HMULQ
782 return true
783 case OpHmul64u:
784 v.Op = OpAMD64HMULQU
785 return true
786 case OpInterCall:
787 v.Op = OpAMD64CALLinter
788 return true
789 case OpIsInBounds:
790 return rewriteValueAMD64_OpIsInBounds(v)
791 case OpIsNonNil:
792 return rewriteValueAMD64_OpIsNonNil(v)
793 case OpIsSliceInBounds:
794 return rewriteValueAMD64_OpIsSliceInBounds(v)
795 case OpLeq16:
796 return rewriteValueAMD64_OpLeq16(v)
797 case OpLeq16U:
798 return rewriteValueAMD64_OpLeq16U(v)
799 case OpLeq32:
800 return rewriteValueAMD64_OpLeq32(v)
801 case OpLeq32F:
802 return rewriteValueAMD64_OpLeq32F(v)
803 case OpLeq32U:
804 return rewriteValueAMD64_OpLeq32U(v)
805 case OpLeq64:
806 return rewriteValueAMD64_OpLeq64(v)
807 case OpLeq64F:
808 return rewriteValueAMD64_OpLeq64F(v)
809 case OpLeq64U:
810 return rewriteValueAMD64_OpLeq64U(v)
811 case OpLeq8:
812 return rewriteValueAMD64_OpLeq8(v)
813 case OpLeq8U:
814 return rewriteValueAMD64_OpLeq8U(v)
815 case OpLess16:
816 return rewriteValueAMD64_OpLess16(v)
817 case OpLess16U:
818 return rewriteValueAMD64_OpLess16U(v)
819 case OpLess32:
820 return rewriteValueAMD64_OpLess32(v)
821 case OpLess32F:
822 return rewriteValueAMD64_OpLess32F(v)
823 case OpLess32U:
824 return rewriteValueAMD64_OpLess32U(v)
825 case OpLess64:
826 return rewriteValueAMD64_OpLess64(v)
827 case OpLess64F:
828 return rewriteValueAMD64_OpLess64F(v)
829 case OpLess64U:
830 return rewriteValueAMD64_OpLess64U(v)
831 case OpLess8:
832 return rewriteValueAMD64_OpLess8(v)
833 case OpLess8U:
834 return rewriteValueAMD64_OpLess8U(v)
835 case OpLoad:
836 return rewriteValueAMD64_OpLoad(v)
837 case OpLocalAddr:
838 return rewriteValueAMD64_OpLocalAddr(v)
839 case OpLsh16x16:
840 return rewriteValueAMD64_OpLsh16x16(v)
841 case OpLsh16x32:
842 return rewriteValueAMD64_OpLsh16x32(v)
843 case OpLsh16x64:
844 return rewriteValueAMD64_OpLsh16x64(v)
845 case OpLsh16x8:
846 return rewriteValueAMD64_OpLsh16x8(v)
847 case OpLsh32x16:
848 return rewriteValueAMD64_OpLsh32x16(v)
849 case OpLsh32x32:
850 return rewriteValueAMD64_OpLsh32x32(v)
851 case OpLsh32x64:
852 return rewriteValueAMD64_OpLsh32x64(v)
853 case OpLsh32x8:
854 return rewriteValueAMD64_OpLsh32x8(v)
855 case OpLsh64x16:
856 return rewriteValueAMD64_OpLsh64x16(v)
857 case OpLsh64x32:
858 return rewriteValueAMD64_OpLsh64x32(v)
859 case OpLsh64x64:
860 return rewriteValueAMD64_OpLsh64x64(v)
861 case OpLsh64x8:
862 return rewriteValueAMD64_OpLsh64x8(v)
863 case OpLsh8x16:
864 return rewriteValueAMD64_OpLsh8x16(v)
865 case OpLsh8x32:
866 return rewriteValueAMD64_OpLsh8x32(v)
867 case OpLsh8x64:
868 return rewriteValueAMD64_OpLsh8x64(v)
869 case OpLsh8x8:
870 return rewriteValueAMD64_OpLsh8x8(v)
871 case OpMod16:
872 return rewriteValueAMD64_OpMod16(v)
873 case OpMod16u:
874 return rewriteValueAMD64_OpMod16u(v)
875 case OpMod32:
876 return rewriteValueAMD64_OpMod32(v)
877 case OpMod32u:
878 return rewriteValueAMD64_OpMod32u(v)
879 case OpMod64:
880 return rewriteValueAMD64_OpMod64(v)
881 case OpMod64u:
882 return rewriteValueAMD64_OpMod64u(v)
883 case OpMod8:
884 return rewriteValueAMD64_OpMod8(v)
885 case OpMod8u:
886 return rewriteValueAMD64_OpMod8u(v)
887 case OpMove:
888 return rewriteValueAMD64_OpMove(v)
889 case OpMul16:
890 v.Op = OpAMD64MULL
891 return true
892 case OpMul32:
893 v.Op = OpAMD64MULL
894 return true
895 case OpMul32F:
896 v.Op = OpAMD64MULSS
897 return true
898 case OpMul64:
899 v.Op = OpAMD64MULQ
900 return true
901 case OpMul64F:
902 v.Op = OpAMD64MULSD
903 return true
904 case OpMul64uhilo:
905 v.Op = OpAMD64MULQU2
906 return true
907 case OpMul8:
908 v.Op = OpAMD64MULL
909 return true
910 case OpNeg16:
911 v.Op = OpAMD64NEGL
912 return true
913 case OpNeg32:
914 v.Op = OpAMD64NEGL
915 return true
916 case OpNeg32F:
917 return rewriteValueAMD64_OpNeg32F(v)
918 case OpNeg64:
919 v.Op = OpAMD64NEGQ
920 return true
921 case OpNeg64F:
922 return rewriteValueAMD64_OpNeg64F(v)
923 case OpNeg8:
924 v.Op = OpAMD64NEGL
925 return true
926 case OpNeq16:
927 return rewriteValueAMD64_OpNeq16(v)
928 case OpNeq32:
929 return rewriteValueAMD64_OpNeq32(v)
930 case OpNeq32F:
931 return rewriteValueAMD64_OpNeq32F(v)
932 case OpNeq64:
933 return rewriteValueAMD64_OpNeq64(v)
934 case OpNeq64F:
935 return rewriteValueAMD64_OpNeq64F(v)
936 case OpNeq8:
937 return rewriteValueAMD64_OpNeq8(v)
938 case OpNeqB:
939 return rewriteValueAMD64_OpNeqB(v)
940 case OpNeqPtr:
941 return rewriteValueAMD64_OpNeqPtr(v)
942 case OpNilCheck:
943 v.Op = OpAMD64LoweredNilCheck
944 return true
945 case OpNot:
946 return rewriteValueAMD64_OpNot(v)
947 case OpOffPtr:
948 return rewriteValueAMD64_OpOffPtr(v)
949 case OpOr16:
950 v.Op = OpAMD64ORL
951 return true
952 case OpOr32:
953 v.Op = OpAMD64ORL
954 return true
955 case OpOr64:
956 v.Op = OpAMD64ORQ
957 return true
958 case OpOr8:
959 v.Op = OpAMD64ORL
960 return true
961 case OpOrB:
962 v.Op = OpAMD64ORL
963 return true
964 case OpPanicBounds:
965 return rewriteValueAMD64_OpPanicBounds(v)
966 case OpPopCount16:
967 return rewriteValueAMD64_OpPopCount16(v)
968 case OpPopCount32:
969 v.Op = OpAMD64POPCNTL
970 return true
971 case OpPopCount64:
972 v.Op = OpAMD64POPCNTQ
973 return true
974 case OpPopCount8:
975 return rewriteValueAMD64_OpPopCount8(v)
976 case OpRotateLeft16:
977 v.Op = OpAMD64ROLW
978 return true
979 case OpRotateLeft32:
980 v.Op = OpAMD64ROLL
981 return true
982 case OpRotateLeft64:
983 v.Op = OpAMD64ROLQ
984 return true
985 case OpRotateLeft8:
986 v.Op = OpAMD64ROLB
987 return true
988 case OpRound32F:
989 v.Op = OpCopy
990 return true
991 case OpRound64F:
992 v.Op = OpCopy
993 return true
994 case OpRoundToEven:
995 return rewriteValueAMD64_OpRoundToEven(v)
996 case OpRsh16Ux16:
997 return rewriteValueAMD64_OpRsh16Ux16(v)
998 case OpRsh16Ux32:
999 return rewriteValueAMD64_OpRsh16Ux32(v)
1000 case OpRsh16Ux64:
1001 return rewriteValueAMD64_OpRsh16Ux64(v)
1002 case OpRsh16Ux8:
1003 return rewriteValueAMD64_OpRsh16Ux8(v)
1004 case OpRsh16x16:
1005 return rewriteValueAMD64_OpRsh16x16(v)
1006 case OpRsh16x32:
1007 return rewriteValueAMD64_OpRsh16x32(v)
1008 case OpRsh16x64:
1009 return rewriteValueAMD64_OpRsh16x64(v)
1010 case OpRsh16x8:
1011 return rewriteValueAMD64_OpRsh16x8(v)
1012 case OpRsh32Ux16:
1013 return rewriteValueAMD64_OpRsh32Ux16(v)
1014 case OpRsh32Ux32:
1015 return rewriteValueAMD64_OpRsh32Ux32(v)
1016 case OpRsh32Ux64:
1017 return rewriteValueAMD64_OpRsh32Ux64(v)
1018 case OpRsh32Ux8:
1019 return rewriteValueAMD64_OpRsh32Ux8(v)
1020 case OpRsh32x16:
1021 return rewriteValueAMD64_OpRsh32x16(v)
1022 case OpRsh32x32:
1023 return rewriteValueAMD64_OpRsh32x32(v)
1024 case OpRsh32x64:
1025 return rewriteValueAMD64_OpRsh32x64(v)
1026 case OpRsh32x8:
1027 return rewriteValueAMD64_OpRsh32x8(v)
1028 case OpRsh64Ux16:
1029 return rewriteValueAMD64_OpRsh64Ux16(v)
1030 case OpRsh64Ux32:
1031 return rewriteValueAMD64_OpRsh64Ux32(v)
1032 case OpRsh64Ux64:
1033 return rewriteValueAMD64_OpRsh64Ux64(v)
1034 case OpRsh64Ux8:
1035 return rewriteValueAMD64_OpRsh64Ux8(v)
1036 case OpRsh64x16:
1037 return rewriteValueAMD64_OpRsh64x16(v)
1038 case OpRsh64x32:
1039 return rewriteValueAMD64_OpRsh64x32(v)
1040 case OpRsh64x64:
1041 return rewriteValueAMD64_OpRsh64x64(v)
1042 case OpRsh64x8:
1043 return rewriteValueAMD64_OpRsh64x8(v)
1044 case OpRsh8Ux16:
1045 return rewriteValueAMD64_OpRsh8Ux16(v)
1046 case OpRsh8Ux32:
1047 return rewriteValueAMD64_OpRsh8Ux32(v)
1048 case OpRsh8Ux64:
1049 return rewriteValueAMD64_OpRsh8Ux64(v)
1050 case OpRsh8Ux8:
1051 return rewriteValueAMD64_OpRsh8Ux8(v)
1052 case OpRsh8x16:
1053 return rewriteValueAMD64_OpRsh8x16(v)
1054 case OpRsh8x32:
1055 return rewriteValueAMD64_OpRsh8x32(v)
1056 case OpRsh8x64:
1057 return rewriteValueAMD64_OpRsh8x64(v)
1058 case OpRsh8x8:
1059 return rewriteValueAMD64_OpRsh8x8(v)
1060 case OpSelect0:
1061 return rewriteValueAMD64_OpSelect0(v)
1062 case OpSelect1:
1063 return rewriteValueAMD64_OpSelect1(v)
1064 case OpSignExt16to32:
1065 v.Op = OpAMD64MOVWQSX
1066 return true
1067 case OpSignExt16to64:
1068 v.Op = OpAMD64MOVWQSX
1069 return true
1070 case OpSignExt32to64:
1071 v.Op = OpAMD64MOVLQSX
1072 return true
1073 case OpSignExt8to16:
1074 v.Op = OpAMD64MOVBQSX
1075 return true
1076 case OpSignExt8to32:
1077 v.Op = OpAMD64MOVBQSX
1078 return true
1079 case OpSignExt8to64:
1080 v.Op = OpAMD64MOVBQSX
1081 return true
1082 case OpSlicemask:
1083 return rewriteValueAMD64_OpSlicemask(v)
1084 case OpSpectreIndex:
1085 return rewriteValueAMD64_OpSpectreIndex(v)
1086 case OpSpectreSliceIndex:
1087 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1088 case OpSqrt:
1089 v.Op = OpAMD64SQRTSD
1090 return true
1091 case OpStaticCall:
1092 v.Op = OpAMD64CALLstatic
1093 return true
1094 case OpStore:
1095 return rewriteValueAMD64_OpStore(v)
1096 case OpSub16:
1097 v.Op = OpAMD64SUBL
1098 return true
1099 case OpSub32:
1100 v.Op = OpAMD64SUBL
1101 return true
1102 case OpSub32F:
1103 v.Op = OpAMD64SUBSS
1104 return true
1105 case OpSub64:
1106 v.Op = OpAMD64SUBQ
1107 return true
1108 case OpSub64F:
1109 v.Op = OpAMD64SUBSD
1110 return true
1111 case OpSub8:
1112 v.Op = OpAMD64SUBL
1113 return true
1114 case OpSubPtr:
1115 v.Op = OpAMD64SUBQ
1116 return true
1117 case OpTrunc:
1118 return rewriteValueAMD64_OpTrunc(v)
1119 case OpTrunc16to8:
1120 v.Op = OpCopy
1121 return true
1122 case OpTrunc32to16:
1123 v.Op = OpCopy
1124 return true
1125 case OpTrunc32to8:
1126 v.Op = OpCopy
1127 return true
1128 case OpTrunc64to16:
1129 v.Op = OpCopy
1130 return true
1131 case OpTrunc64to32:
1132 v.Op = OpCopy
1133 return true
1134 case OpTrunc64to8:
1135 v.Op = OpCopy
1136 return true
1137 case OpWB:
1138 v.Op = OpAMD64LoweredWB
1139 return true
1140 case OpXor16:
1141 v.Op = OpAMD64XORL
1142 return true
1143 case OpXor32:
1144 v.Op = OpAMD64XORL
1145 return true
1146 case OpXor64:
1147 v.Op = OpAMD64XORQ
1148 return true
1149 case OpXor8:
1150 v.Op = OpAMD64XORL
1151 return true
1152 case OpZero:
1153 return rewriteValueAMD64_OpZero(v)
1154 case OpZeroExt16to32:
1155 v.Op = OpAMD64MOVWQZX
1156 return true
1157 case OpZeroExt16to64:
1158 v.Op = OpAMD64MOVWQZX
1159 return true
1160 case OpZeroExt32to64:
1161 v.Op = OpAMD64MOVLQZX
1162 return true
1163 case OpZeroExt8to16:
1164 v.Op = OpAMD64MOVBQZX
1165 return true
1166 case OpZeroExt8to32:
1167 v.Op = OpAMD64MOVBQZX
1168 return true
1169 case OpZeroExt8to64:
1170 v.Op = OpAMD64MOVBQZX
1171 return true
1172 }
1173 return false
1174 }
1175 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1176 v_2 := v.Args[2]
1177 v_1 := v.Args[1]
1178 v_0 := v.Args[0]
1179
1180
1181
1182 for {
1183 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1184 x := v_0
1185 if v_1.Op != OpAMD64MOVQconst {
1186 continue
1187 }
1188 c := auxIntToInt64(v_1.AuxInt)
1189 carry := v_2
1190 if !(is32Bit(c)) {
1191 continue
1192 }
1193 v.reset(OpAMD64ADCQconst)
1194 v.AuxInt = int32ToAuxInt(int32(c))
1195 v.AddArg2(x, carry)
1196 return true
1197 }
1198 break
1199 }
1200
1201
1202 for {
1203 x := v_0
1204 y := v_1
1205 if v_2.Op != OpAMD64FlagEQ {
1206 break
1207 }
1208 v.reset(OpAMD64ADDQcarry)
1209 v.AddArg2(x, y)
1210 return true
1211 }
1212 return false
1213 }
1214 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1215 v_1 := v.Args[1]
1216 v_0 := v.Args[0]
1217
1218
1219 for {
1220 c := auxIntToInt32(v.AuxInt)
1221 x := v_0
1222 if v_1.Op != OpAMD64FlagEQ {
1223 break
1224 }
1225 v.reset(OpAMD64ADDQconstcarry)
1226 v.AuxInt = int32ToAuxInt(c)
1227 v.AddArg(x)
1228 return true
1229 }
1230 return false
1231 }
1232 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1233 v_1 := v.Args[1]
1234 v_0 := v.Args[0]
1235
1236
1237 for {
1238 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1239 x := v_0
1240 if v_1.Op != OpAMD64MOVLconst {
1241 continue
1242 }
1243 c := auxIntToInt32(v_1.AuxInt)
1244 v.reset(OpAMD64ADDLconst)
1245 v.AuxInt = int32ToAuxInt(c)
1246 v.AddArg(x)
1247 return true
1248 }
1249 break
1250 }
1251
1252
1253
1254 for {
1255 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1256 if v_0.Op != OpAMD64SHLLconst {
1257 continue
1258 }
1259 c := auxIntToInt8(v_0.AuxInt)
1260 x := v_0.Args[0]
1261 if v_1.Op != OpAMD64SHRLconst {
1262 continue
1263 }
1264 d := auxIntToInt8(v_1.AuxInt)
1265 if x != v_1.Args[0] || !(d == 32-c) {
1266 continue
1267 }
1268 v.reset(OpAMD64ROLLconst)
1269 v.AuxInt = int8ToAuxInt(c)
1270 v.AddArg(x)
1271 return true
1272 }
1273 break
1274 }
1275
1276
1277
1278 for {
1279 t := v.Type
1280 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1281 if v_0.Op != OpAMD64SHLLconst {
1282 continue
1283 }
1284 c := auxIntToInt8(v_0.AuxInt)
1285 x := v_0.Args[0]
1286 if v_1.Op != OpAMD64SHRWconst {
1287 continue
1288 }
1289 d := auxIntToInt8(v_1.AuxInt)
1290 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1291 continue
1292 }
1293 v.reset(OpAMD64ROLWconst)
1294 v.AuxInt = int8ToAuxInt(c)
1295 v.AddArg(x)
1296 return true
1297 }
1298 break
1299 }
1300
1301
1302
1303 for {
1304 t := v.Type
1305 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1306 if v_0.Op != OpAMD64SHLLconst {
1307 continue
1308 }
1309 c := auxIntToInt8(v_0.AuxInt)
1310 x := v_0.Args[0]
1311 if v_1.Op != OpAMD64SHRBconst {
1312 continue
1313 }
1314 d := auxIntToInt8(v_1.AuxInt)
1315 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1316 continue
1317 }
1318 v.reset(OpAMD64ROLBconst)
1319 v.AuxInt = int8ToAuxInt(c)
1320 v.AddArg(x)
1321 return true
1322 }
1323 break
1324 }
1325
1326
1327 for {
1328 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1329 x := v_0
1330 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1331 continue
1332 }
1333 y := v_1.Args[0]
1334 v.reset(OpAMD64LEAL8)
1335 v.AddArg2(x, y)
1336 return true
1337 }
1338 break
1339 }
1340
1341
1342 for {
1343 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1344 x := v_0
1345 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1346 continue
1347 }
1348 y := v_1.Args[0]
1349 v.reset(OpAMD64LEAL4)
1350 v.AddArg2(x, y)
1351 return true
1352 }
1353 break
1354 }
1355
1356
1357 for {
1358 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1359 x := v_0
1360 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1361 continue
1362 }
1363 y := v_1.Args[0]
1364 v.reset(OpAMD64LEAL2)
1365 v.AddArg2(x, y)
1366 return true
1367 }
1368 break
1369 }
1370
1371
1372 for {
1373 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1374 x := v_0
1375 if v_1.Op != OpAMD64ADDL {
1376 continue
1377 }
1378 y := v_1.Args[1]
1379 if y != v_1.Args[0] {
1380 continue
1381 }
1382 v.reset(OpAMD64LEAL2)
1383 v.AddArg2(x, y)
1384 return true
1385 }
1386 break
1387 }
1388
1389
1390 for {
1391 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1392 x := v_0
1393 if v_1.Op != OpAMD64ADDL {
1394 continue
1395 }
1396 _ = v_1.Args[1]
1397 v_1_0 := v_1.Args[0]
1398 v_1_1 := v_1.Args[1]
1399 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1400 if x != v_1_0 {
1401 continue
1402 }
1403 y := v_1_1
1404 v.reset(OpAMD64LEAL2)
1405 v.AddArg2(y, x)
1406 return true
1407 }
1408 }
1409 break
1410 }
1411
1412
1413 for {
1414 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1415 if v_0.Op != OpAMD64ADDLconst {
1416 continue
1417 }
1418 c := auxIntToInt32(v_0.AuxInt)
1419 x := v_0.Args[0]
1420 y := v_1
1421 v.reset(OpAMD64LEAL1)
1422 v.AuxInt = int32ToAuxInt(c)
1423 v.AddArg2(x, y)
1424 return true
1425 }
1426 break
1427 }
1428
1429
1430
1431 for {
1432 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1433 x := v_0
1434 if v_1.Op != OpAMD64LEAL {
1435 continue
1436 }
1437 c := auxIntToInt32(v_1.AuxInt)
1438 s := auxToSym(v_1.Aux)
1439 y := v_1.Args[0]
1440 if !(x.Op != OpSB && y.Op != OpSB) {
1441 continue
1442 }
1443 v.reset(OpAMD64LEAL1)
1444 v.AuxInt = int32ToAuxInt(c)
1445 v.Aux = symToAux(s)
1446 v.AddArg2(x, y)
1447 return true
1448 }
1449 break
1450 }
1451
1452
1453 for {
1454 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1455 x := v_0
1456 if v_1.Op != OpAMD64NEGL {
1457 continue
1458 }
1459 y := v_1.Args[0]
1460 v.reset(OpAMD64SUBL)
1461 v.AddArg2(x, y)
1462 return true
1463 }
1464 break
1465 }
1466
1467
1468
1469 for {
1470 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1471 x := v_0
1472 l := v_1
1473 if l.Op != OpAMD64MOVLload {
1474 continue
1475 }
1476 off := auxIntToInt32(l.AuxInt)
1477 sym := auxToSym(l.Aux)
1478 mem := l.Args[1]
1479 ptr := l.Args[0]
1480 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1481 continue
1482 }
1483 v.reset(OpAMD64ADDLload)
1484 v.AuxInt = int32ToAuxInt(off)
1485 v.Aux = symToAux(sym)
1486 v.AddArg3(x, ptr, mem)
1487 return true
1488 }
1489 break
1490 }
1491 return false
1492 }
1493 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1494 v_0 := v.Args[0]
1495
1496
1497 for {
1498 c := auxIntToInt32(v.AuxInt)
1499 if v_0.Op != OpAMD64ADDL {
1500 break
1501 }
1502 y := v_0.Args[1]
1503 x := v_0.Args[0]
1504 v.reset(OpAMD64LEAL1)
1505 v.AuxInt = int32ToAuxInt(c)
1506 v.AddArg2(x, y)
1507 return true
1508 }
1509
1510
1511 for {
1512 c := auxIntToInt32(v.AuxInt)
1513 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1514 break
1515 }
1516 x := v_0.Args[0]
1517 v.reset(OpAMD64LEAL1)
1518 v.AuxInt = int32ToAuxInt(c)
1519 v.AddArg2(x, x)
1520 return true
1521 }
1522
1523
1524
1525 for {
1526 c := auxIntToInt32(v.AuxInt)
1527 if v_0.Op != OpAMD64LEAL {
1528 break
1529 }
1530 d := auxIntToInt32(v_0.AuxInt)
1531 s := auxToSym(v_0.Aux)
1532 x := v_0.Args[0]
1533 if !(is32Bit(int64(c) + int64(d))) {
1534 break
1535 }
1536 v.reset(OpAMD64LEAL)
1537 v.AuxInt = int32ToAuxInt(c + d)
1538 v.Aux = symToAux(s)
1539 v.AddArg(x)
1540 return true
1541 }
1542
1543
1544
1545 for {
1546 c := auxIntToInt32(v.AuxInt)
1547 if v_0.Op != OpAMD64LEAL1 {
1548 break
1549 }
1550 d := auxIntToInt32(v_0.AuxInt)
1551 s := auxToSym(v_0.Aux)
1552 y := v_0.Args[1]
1553 x := v_0.Args[0]
1554 if !(is32Bit(int64(c) + int64(d))) {
1555 break
1556 }
1557 v.reset(OpAMD64LEAL1)
1558 v.AuxInt = int32ToAuxInt(c + d)
1559 v.Aux = symToAux(s)
1560 v.AddArg2(x, y)
1561 return true
1562 }
1563
1564
1565
1566 for {
1567 c := auxIntToInt32(v.AuxInt)
1568 if v_0.Op != OpAMD64LEAL2 {
1569 break
1570 }
1571 d := auxIntToInt32(v_0.AuxInt)
1572 s := auxToSym(v_0.Aux)
1573 y := v_0.Args[1]
1574 x := v_0.Args[0]
1575 if !(is32Bit(int64(c) + int64(d))) {
1576 break
1577 }
1578 v.reset(OpAMD64LEAL2)
1579 v.AuxInt = int32ToAuxInt(c + d)
1580 v.Aux = symToAux(s)
1581 v.AddArg2(x, y)
1582 return true
1583 }
1584
1585
1586
1587 for {
1588 c := auxIntToInt32(v.AuxInt)
1589 if v_0.Op != OpAMD64LEAL4 {
1590 break
1591 }
1592 d := auxIntToInt32(v_0.AuxInt)
1593 s := auxToSym(v_0.Aux)
1594 y := v_0.Args[1]
1595 x := v_0.Args[0]
1596 if !(is32Bit(int64(c) + int64(d))) {
1597 break
1598 }
1599 v.reset(OpAMD64LEAL4)
1600 v.AuxInt = int32ToAuxInt(c + d)
1601 v.Aux = symToAux(s)
1602 v.AddArg2(x, y)
1603 return true
1604 }
1605
1606
1607
1608 for {
1609 c := auxIntToInt32(v.AuxInt)
1610 if v_0.Op != OpAMD64LEAL8 {
1611 break
1612 }
1613 d := auxIntToInt32(v_0.AuxInt)
1614 s := auxToSym(v_0.Aux)
1615 y := v_0.Args[1]
1616 x := v_0.Args[0]
1617 if !(is32Bit(int64(c) + int64(d))) {
1618 break
1619 }
1620 v.reset(OpAMD64LEAL8)
1621 v.AuxInt = int32ToAuxInt(c + d)
1622 v.Aux = symToAux(s)
1623 v.AddArg2(x, y)
1624 return true
1625 }
1626
1627
1628
1629 for {
1630 c := auxIntToInt32(v.AuxInt)
1631 x := v_0
1632 if !(c == 0) {
1633 break
1634 }
1635 v.copyOf(x)
1636 return true
1637 }
1638
1639
1640 for {
1641 c := auxIntToInt32(v.AuxInt)
1642 if v_0.Op != OpAMD64MOVLconst {
1643 break
1644 }
1645 d := auxIntToInt32(v_0.AuxInt)
1646 v.reset(OpAMD64MOVLconst)
1647 v.AuxInt = int32ToAuxInt(c + d)
1648 return true
1649 }
1650
1651
1652 for {
1653 c := auxIntToInt32(v.AuxInt)
1654 if v_0.Op != OpAMD64ADDLconst {
1655 break
1656 }
1657 d := auxIntToInt32(v_0.AuxInt)
1658 x := v_0.Args[0]
1659 v.reset(OpAMD64ADDLconst)
1660 v.AuxInt = int32ToAuxInt(c + d)
1661 v.AddArg(x)
1662 return true
1663 }
1664
1665
1666 for {
1667 off := auxIntToInt32(v.AuxInt)
1668 x := v_0
1669 if x.Op != OpSP {
1670 break
1671 }
1672 v.reset(OpAMD64LEAL)
1673 v.AuxInt = int32ToAuxInt(off)
1674 v.AddArg(x)
1675 return true
1676 }
1677 return false
1678 }
1679 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1680 v_1 := v.Args[1]
1681 v_0 := v.Args[0]
1682
1683
1684
1685 for {
1686 valoff1 := auxIntToValAndOff(v.AuxInt)
1687 sym := auxToSym(v.Aux)
1688 if v_0.Op != OpAMD64ADDQconst {
1689 break
1690 }
1691 off2 := auxIntToInt32(v_0.AuxInt)
1692 base := v_0.Args[0]
1693 mem := v_1
1694 if !(ValAndOff(valoff1).canAdd32(off2)) {
1695 break
1696 }
1697 v.reset(OpAMD64ADDLconstmodify)
1698 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1699 v.Aux = symToAux(sym)
1700 v.AddArg2(base, mem)
1701 return true
1702 }
1703
1704
1705
1706 for {
1707 valoff1 := auxIntToValAndOff(v.AuxInt)
1708 sym1 := auxToSym(v.Aux)
1709 if v_0.Op != OpAMD64LEAQ {
1710 break
1711 }
1712 off2 := auxIntToInt32(v_0.AuxInt)
1713 sym2 := auxToSym(v_0.Aux)
1714 base := v_0.Args[0]
1715 mem := v_1
1716 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1717 break
1718 }
1719 v.reset(OpAMD64ADDLconstmodify)
1720 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1721 v.Aux = symToAux(mergeSym(sym1, sym2))
1722 v.AddArg2(base, mem)
1723 return true
1724 }
1725 return false
1726 }
1727 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1728 v_2 := v.Args[2]
1729 v_1 := v.Args[1]
1730 v_0 := v.Args[0]
1731 b := v.Block
1732 typ := &b.Func.Config.Types
1733
1734
1735
1736 for {
1737 off1 := auxIntToInt32(v.AuxInt)
1738 sym := auxToSym(v.Aux)
1739 val := v_0
1740 if v_1.Op != OpAMD64ADDQconst {
1741 break
1742 }
1743 off2 := auxIntToInt32(v_1.AuxInt)
1744 base := v_1.Args[0]
1745 mem := v_2
1746 if !(is32Bit(int64(off1) + int64(off2))) {
1747 break
1748 }
1749 v.reset(OpAMD64ADDLload)
1750 v.AuxInt = int32ToAuxInt(off1 + off2)
1751 v.Aux = symToAux(sym)
1752 v.AddArg3(val, base, mem)
1753 return true
1754 }
1755
1756
1757
1758 for {
1759 off1 := auxIntToInt32(v.AuxInt)
1760 sym1 := auxToSym(v.Aux)
1761 val := v_0
1762 if v_1.Op != OpAMD64LEAQ {
1763 break
1764 }
1765 off2 := auxIntToInt32(v_1.AuxInt)
1766 sym2 := auxToSym(v_1.Aux)
1767 base := v_1.Args[0]
1768 mem := v_2
1769 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1770 break
1771 }
1772 v.reset(OpAMD64ADDLload)
1773 v.AuxInt = int32ToAuxInt(off1 + off2)
1774 v.Aux = symToAux(mergeSym(sym1, sym2))
1775 v.AddArg3(val, base, mem)
1776 return true
1777 }
1778
1779
1780 for {
1781 off := auxIntToInt32(v.AuxInt)
1782 sym := auxToSym(v.Aux)
1783 x := v_0
1784 ptr := v_1
1785 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1786 break
1787 }
1788 y := v_2.Args[1]
1789 if ptr != v_2.Args[0] {
1790 break
1791 }
1792 v.reset(OpAMD64ADDL)
1793 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1794 v0.AddArg(y)
1795 v.AddArg2(x, v0)
1796 return true
1797 }
1798 return false
1799 }
1800 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1801 v_2 := v.Args[2]
1802 v_1 := v.Args[1]
1803 v_0 := v.Args[0]
1804
1805
1806
1807 for {
1808 off1 := auxIntToInt32(v.AuxInt)
1809 sym := auxToSym(v.Aux)
1810 if v_0.Op != OpAMD64ADDQconst {
1811 break
1812 }
1813 off2 := auxIntToInt32(v_0.AuxInt)
1814 base := v_0.Args[0]
1815 val := v_1
1816 mem := v_2
1817 if !(is32Bit(int64(off1) + int64(off2))) {
1818 break
1819 }
1820 v.reset(OpAMD64ADDLmodify)
1821 v.AuxInt = int32ToAuxInt(off1 + off2)
1822 v.Aux = symToAux(sym)
1823 v.AddArg3(base, val, mem)
1824 return true
1825 }
1826
1827
1828
1829 for {
1830 off1 := auxIntToInt32(v.AuxInt)
1831 sym1 := auxToSym(v.Aux)
1832 if v_0.Op != OpAMD64LEAQ {
1833 break
1834 }
1835 off2 := auxIntToInt32(v_0.AuxInt)
1836 sym2 := auxToSym(v_0.Aux)
1837 base := v_0.Args[0]
1838 val := v_1
1839 mem := v_2
1840 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1841 break
1842 }
1843 v.reset(OpAMD64ADDLmodify)
1844 v.AuxInt = int32ToAuxInt(off1 + off2)
1845 v.Aux = symToAux(mergeSym(sym1, sym2))
1846 v.AddArg3(base, val, mem)
1847 return true
1848 }
1849 return false
1850 }
1851 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1852 v_1 := v.Args[1]
1853 v_0 := v.Args[0]
1854
1855
1856
1857 for {
1858 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1859 x := v_0
1860 if v_1.Op != OpAMD64MOVQconst {
1861 continue
1862 }
1863 c := auxIntToInt64(v_1.AuxInt)
1864 if !(is32Bit(c)) {
1865 continue
1866 }
1867 v.reset(OpAMD64ADDQconst)
1868 v.AuxInt = int32ToAuxInt(int32(c))
1869 v.AddArg(x)
1870 return true
1871 }
1872 break
1873 }
1874
1875
1876 for {
1877 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1878 x := v_0
1879 if v_1.Op != OpAMD64MOVLconst {
1880 continue
1881 }
1882 c := auxIntToInt32(v_1.AuxInt)
1883 v.reset(OpAMD64ADDQconst)
1884 v.AuxInt = int32ToAuxInt(c)
1885 v.AddArg(x)
1886 return true
1887 }
1888 break
1889 }
1890
1891
1892
1893 for {
1894 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1895 if v_0.Op != OpAMD64SHLQconst {
1896 continue
1897 }
1898 c := auxIntToInt8(v_0.AuxInt)
1899 x := v_0.Args[0]
1900 if v_1.Op != OpAMD64SHRQconst {
1901 continue
1902 }
1903 d := auxIntToInt8(v_1.AuxInt)
1904 if x != v_1.Args[0] || !(d == 64-c) {
1905 continue
1906 }
1907 v.reset(OpAMD64ROLQconst)
1908 v.AuxInt = int8ToAuxInt(c)
1909 v.AddArg(x)
1910 return true
1911 }
1912 break
1913 }
1914
1915
1916 for {
1917 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1918 x := v_0
1919 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1920 continue
1921 }
1922 y := v_1.Args[0]
1923 v.reset(OpAMD64LEAQ8)
1924 v.AddArg2(x, y)
1925 return true
1926 }
1927 break
1928 }
1929
1930
1931 for {
1932 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1933 x := v_0
1934 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1935 continue
1936 }
1937 y := v_1.Args[0]
1938 v.reset(OpAMD64LEAQ4)
1939 v.AddArg2(x, y)
1940 return true
1941 }
1942 break
1943 }
1944
1945
1946 for {
1947 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1948 x := v_0
1949 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1950 continue
1951 }
1952 y := v_1.Args[0]
1953 v.reset(OpAMD64LEAQ2)
1954 v.AddArg2(x, y)
1955 return true
1956 }
1957 break
1958 }
1959
1960
1961 for {
1962 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1963 x := v_0
1964 if v_1.Op != OpAMD64ADDQ {
1965 continue
1966 }
1967 y := v_1.Args[1]
1968 if y != v_1.Args[0] {
1969 continue
1970 }
1971 v.reset(OpAMD64LEAQ2)
1972 v.AddArg2(x, y)
1973 return true
1974 }
1975 break
1976 }
1977
1978
1979 for {
1980 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1981 x := v_0
1982 if v_1.Op != OpAMD64ADDQ {
1983 continue
1984 }
1985 _ = v_1.Args[1]
1986 v_1_0 := v_1.Args[0]
1987 v_1_1 := v_1.Args[1]
1988 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1989 if x != v_1_0 {
1990 continue
1991 }
1992 y := v_1_1
1993 v.reset(OpAMD64LEAQ2)
1994 v.AddArg2(y, x)
1995 return true
1996 }
1997 }
1998 break
1999 }
2000
2001
2002 for {
2003 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2004 if v_0.Op != OpAMD64ADDQconst {
2005 continue
2006 }
2007 c := auxIntToInt32(v_0.AuxInt)
2008 x := v_0.Args[0]
2009 y := v_1
2010 v.reset(OpAMD64LEAQ1)
2011 v.AuxInt = int32ToAuxInt(c)
2012 v.AddArg2(x, y)
2013 return true
2014 }
2015 break
2016 }
2017
2018
2019
2020 for {
2021 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2022 x := v_0
2023 if v_1.Op != OpAMD64LEAQ {
2024 continue
2025 }
2026 c := auxIntToInt32(v_1.AuxInt)
2027 s := auxToSym(v_1.Aux)
2028 y := v_1.Args[0]
2029 if !(x.Op != OpSB && y.Op != OpSB) {
2030 continue
2031 }
2032 v.reset(OpAMD64LEAQ1)
2033 v.AuxInt = int32ToAuxInt(c)
2034 v.Aux = symToAux(s)
2035 v.AddArg2(x, y)
2036 return true
2037 }
2038 break
2039 }
2040
2041
2042 for {
2043 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2044 x := v_0
2045 if v_1.Op != OpAMD64NEGQ {
2046 continue
2047 }
2048 y := v_1.Args[0]
2049 v.reset(OpAMD64SUBQ)
2050 v.AddArg2(x, y)
2051 return true
2052 }
2053 break
2054 }
2055
2056
2057
2058 for {
2059 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2060 x := v_0
2061 l := v_1
2062 if l.Op != OpAMD64MOVQload {
2063 continue
2064 }
2065 off := auxIntToInt32(l.AuxInt)
2066 sym := auxToSym(l.Aux)
2067 mem := l.Args[1]
2068 ptr := l.Args[0]
2069 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2070 continue
2071 }
2072 v.reset(OpAMD64ADDQload)
2073 v.AuxInt = int32ToAuxInt(off)
2074 v.Aux = symToAux(sym)
2075 v.AddArg3(x, ptr, mem)
2076 return true
2077 }
2078 break
2079 }
2080 return false
2081 }
2082 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2083 v_1 := v.Args[1]
2084 v_0 := v.Args[0]
2085
2086
2087
2088 for {
2089 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2090 x := v_0
2091 if v_1.Op != OpAMD64MOVQconst {
2092 continue
2093 }
2094 c := auxIntToInt64(v_1.AuxInt)
2095 if !(is32Bit(c)) {
2096 continue
2097 }
2098 v.reset(OpAMD64ADDQconstcarry)
2099 v.AuxInt = int32ToAuxInt(int32(c))
2100 v.AddArg(x)
2101 return true
2102 }
2103 break
2104 }
2105 return false
2106 }
2107 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2108 v_0 := v.Args[0]
2109
2110
2111 for {
2112 c := auxIntToInt32(v.AuxInt)
2113 if v_0.Op != OpAMD64ADDQ {
2114 break
2115 }
2116 y := v_0.Args[1]
2117 x := v_0.Args[0]
2118 v.reset(OpAMD64LEAQ1)
2119 v.AuxInt = int32ToAuxInt(c)
2120 v.AddArg2(x, y)
2121 return true
2122 }
2123
2124
2125 for {
2126 c := auxIntToInt32(v.AuxInt)
2127 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2128 break
2129 }
2130 x := v_0.Args[0]
2131 v.reset(OpAMD64LEAQ1)
2132 v.AuxInt = int32ToAuxInt(c)
2133 v.AddArg2(x, x)
2134 return true
2135 }
2136
2137
2138
2139 for {
2140 c := auxIntToInt32(v.AuxInt)
2141 if v_0.Op != OpAMD64LEAQ {
2142 break
2143 }
2144 d := auxIntToInt32(v_0.AuxInt)
2145 s := auxToSym(v_0.Aux)
2146 x := v_0.Args[0]
2147 if !(is32Bit(int64(c) + int64(d))) {
2148 break
2149 }
2150 v.reset(OpAMD64LEAQ)
2151 v.AuxInt = int32ToAuxInt(c + d)
2152 v.Aux = symToAux(s)
2153 v.AddArg(x)
2154 return true
2155 }
2156
2157
2158
2159 for {
2160 c := auxIntToInt32(v.AuxInt)
2161 if v_0.Op != OpAMD64LEAQ1 {
2162 break
2163 }
2164 d := auxIntToInt32(v_0.AuxInt)
2165 s := auxToSym(v_0.Aux)
2166 y := v_0.Args[1]
2167 x := v_0.Args[0]
2168 if !(is32Bit(int64(c) + int64(d))) {
2169 break
2170 }
2171 v.reset(OpAMD64LEAQ1)
2172 v.AuxInt = int32ToAuxInt(c + d)
2173 v.Aux = symToAux(s)
2174 v.AddArg2(x, y)
2175 return true
2176 }
2177
2178
2179
2180 for {
2181 c := auxIntToInt32(v.AuxInt)
2182 if v_0.Op != OpAMD64LEAQ2 {
2183 break
2184 }
2185 d := auxIntToInt32(v_0.AuxInt)
2186 s := auxToSym(v_0.Aux)
2187 y := v_0.Args[1]
2188 x := v_0.Args[0]
2189 if !(is32Bit(int64(c) + int64(d))) {
2190 break
2191 }
2192 v.reset(OpAMD64LEAQ2)
2193 v.AuxInt = int32ToAuxInt(c + d)
2194 v.Aux = symToAux(s)
2195 v.AddArg2(x, y)
2196 return true
2197 }
2198
2199
2200
2201 for {
2202 c := auxIntToInt32(v.AuxInt)
2203 if v_0.Op != OpAMD64LEAQ4 {
2204 break
2205 }
2206 d := auxIntToInt32(v_0.AuxInt)
2207 s := auxToSym(v_0.Aux)
2208 y := v_0.Args[1]
2209 x := v_0.Args[0]
2210 if !(is32Bit(int64(c) + int64(d))) {
2211 break
2212 }
2213 v.reset(OpAMD64LEAQ4)
2214 v.AuxInt = int32ToAuxInt(c + d)
2215 v.Aux = symToAux(s)
2216 v.AddArg2(x, y)
2217 return true
2218 }
2219
2220
2221
2222 for {
2223 c := auxIntToInt32(v.AuxInt)
2224 if v_0.Op != OpAMD64LEAQ8 {
2225 break
2226 }
2227 d := auxIntToInt32(v_0.AuxInt)
2228 s := auxToSym(v_0.Aux)
2229 y := v_0.Args[1]
2230 x := v_0.Args[0]
2231 if !(is32Bit(int64(c) + int64(d))) {
2232 break
2233 }
2234 v.reset(OpAMD64LEAQ8)
2235 v.AuxInt = int32ToAuxInt(c + d)
2236 v.Aux = symToAux(s)
2237 v.AddArg2(x, y)
2238 return true
2239 }
2240
2241
2242 for {
2243 if auxIntToInt32(v.AuxInt) != 0 {
2244 break
2245 }
2246 x := v_0
2247 v.copyOf(x)
2248 return true
2249 }
2250
2251
2252 for {
2253 c := auxIntToInt32(v.AuxInt)
2254 if v_0.Op != OpAMD64MOVQconst {
2255 break
2256 }
2257 d := auxIntToInt64(v_0.AuxInt)
2258 v.reset(OpAMD64MOVQconst)
2259 v.AuxInt = int64ToAuxInt(int64(c) + d)
2260 return true
2261 }
2262
2263
2264
2265 for {
2266 c := auxIntToInt32(v.AuxInt)
2267 if v_0.Op != OpAMD64ADDQconst {
2268 break
2269 }
2270 d := auxIntToInt32(v_0.AuxInt)
2271 x := v_0.Args[0]
2272 if !(is32Bit(int64(c) + int64(d))) {
2273 break
2274 }
2275 v.reset(OpAMD64ADDQconst)
2276 v.AuxInt = int32ToAuxInt(c + d)
2277 v.AddArg(x)
2278 return true
2279 }
2280
2281
2282 for {
2283 off := auxIntToInt32(v.AuxInt)
2284 x := v_0
2285 if x.Op != OpSP {
2286 break
2287 }
2288 v.reset(OpAMD64LEAQ)
2289 v.AuxInt = int32ToAuxInt(off)
2290 v.AddArg(x)
2291 return true
2292 }
2293 return false
2294 }
2295 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2296 v_1 := v.Args[1]
2297 v_0 := v.Args[0]
2298
2299
2300
2301 for {
2302 valoff1 := auxIntToValAndOff(v.AuxInt)
2303 sym := auxToSym(v.Aux)
2304 if v_0.Op != OpAMD64ADDQconst {
2305 break
2306 }
2307 off2 := auxIntToInt32(v_0.AuxInt)
2308 base := v_0.Args[0]
2309 mem := v_1
2310 if !(ValAndOff(valoff1).canAdd32(off2)) {
2311 break
2312 }
2313 v.reset(OpAMD64ADDQconstmodify)
2314 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2315 v.Aux = symToAux(sym)
2316 v.AddArg2(base, mem)
2317 return true
2318 }
2319
2320
2321
2322 for {
2323 valoff1 := auxIntToValAndOff(v.AuxInt)
2324 sym1 := auxToSym(v.Aux)
2325 if v_0.Op != OpAMD64LEAQ {
2326 break
2327 }
2328 off2 := auxIntToInt32(v_0.AuxInt)
2329 sym2 := auxToSym(v_0.Aux)
2330 base := v_0.Args[0]
2331 mem := v_1
2332 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2333 break
2334 }
2335 v.reset(OpAMD64ADDQconstmodify)
2336 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2337 v.Aux = symToAux(mergeSym(sym1, sym2))
2338 v.AddArg2(base, mem)
2339 return true
2340 }
2341 return false
2342 }
2343 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2344 v_2 := v.Args[2]
2345 v_1 := v.Args[1]
2346 v_0 := v.Args[0]
2347 b := v.Block
2348 typ := &b.Func.Config.Types
2349
2350
2351
2352 for {
2353 off1 := auxIntToInt32(v.AuxInt)
2354 sym := auxToSym(v.Aux)
2355 val := v_0
2356 if v_1.Op != OpAMD64ADDQconst {
2357 break
2358 }
2359 off2 := auxIntToInt32(v_1.AuxInt)
2360 base := v_1.Args[0]
2361 mem := v_2
2362 if !(is32Bit(int64(off1) + int64(off2))) {
2363 break
2364 }
2365 v.reset(OpAMD64ADDQload)
2366 v.AuxInt = int32ToAuxInt(off1 + off2)
2367 v.Aux = symToAux(sym)
2368 v.AddArg3(val, base, mem)
2369 return true
2370 }
2371
2372
2373
2374 for {
2375 off1 := auxIntToInt32(v.AuxInt)
2376 sym1 := auxToSym(v.Aux)
2377 val := v_0
2378 if v_1.Op != OpAMD64LEAQ {
2379 break
2380 }
2381 off2 := auxIntToInt32(v_1.AuxInt)
2382 sym2 := auxToSym(v_1.Aux)
2383 base := v_1.Args[0]
2384 mem := v_2
2385 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2386 break
2387 }
2388 v.reset(OpAMD64ADDQload)
2389 v.AuxInt = int32ToAuxInt(off1 + off2)
2390 v.Aux = symToAux(mergeSym(sym1, sym2))
2391 v.AddArg3(val, base, mem)
2392 return true
2393 }
2394
2395
2396 for {
2397 off := auxIntToInt32(v.AuxInt)
2398 sym := auxToSym(v.Aux)
2399 x := v_0
2400 ptr := v_1
2401 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2402 break
2403 }
2404 y := v_2.Args[1]
2405 if ptr != v_2.Args[0] {
2406 break
2407 }
2408 v.reset(OpAMD64ADDQ)
2409 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2410 v0.AddArg(y)
2411 v.AddArg2(x, v0)
2412 return true
2413 }
2414 return false
2415 }
2416 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2417 v_2 := v.Args[2]
2418 v_1 := v.Args[1]
2419 v_0 := v.Args[0]
2420
2421
2422
2423 for {
2424 off1 := auxIntToInt32(v.AuxInt)
2425 sym := auxToSym(v.Aux)
2426 if v_0.Op != OpAMD64ADDQconst {
2427 break
2428 }
2429 off2 := auxIntToInt32(v_0.AuxInt)
2430 base := v_0.Args[0]
2431 val := v_1
2432 mem := v_2
2433 if !(is32Bit(int64(off1) + int64(off2))) {
2434 break
2435 }
2436 v.reset(OpAMD64ADDQmodify)
2437 v.AuxInt = int32ToAuxInt(off1 + off2)
2438 v.Aux = symToAux(sym)
2439 v.AddArg3(base, val, mem)
2440 return true
2441 }
2442
2443
2444
2445 for {
2446 off1 := auxIntToInt32(v.AuxInt)
2447 sym1 := auxToSym(v.Aux)
2448 if v_0.Op != OpAMD64LEAQ {
2449 break
2450 }
2451 off2 := auxIntToInt32(v_0.AuxInt)
2452 sym2 := auxToSym(v_0.Aux)
2453 base := v_0.Args[0]
2454 val := v_1
2455 mem := v_2
2456 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2457 break
2458 }
2459 v.reset(OpAMD64ADDQmodify)
2460 v.AuxInt = int32ToAuxInt(off1 + off2)
2461 v.Aux = symToAux(mergeSym(sym1, sym2))
2462 v.AddArg3(base, val, mem)
2463 return true
2464 }
2465 return false
2466 }
2467 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2468 v_1 := v.Args[1]
2469 v_0 := v.Args[0]
2470
2471
2472
2473 for {
2474 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2475 x := v_0
2476 l := v_1
2477 if l.Op != OpAMD64MOVSDload {
2478 continue
2479 }
2480 off := auxIntToInt32(l.AuxInt)
2481 sym := auxToSym(l.Aux)
2482 mem := l.Args[1]
2483 ptr := l.Args[0]
2484 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2485 continue
2486 }
2487 v.reset(OpAMD64ADDSDload)
2488 v.AuxInt = int32ToAuxInt(off)
2489 v.Aux = symToAux(sym)
2490 v.AddArg3(x, ptr, mem)
2491 return true
2492 }
2493 break
2494 }
2495 return false
2496 }
2497 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2498 v_2 := v.Args[2]
2499 v_1 := v.Args[1]
2500 v_0 := v.Args[0]
2501 b := v.Block
2502 typ := &b.Func.Config.Types
2503
2504
2505
2506 for {
2507 off1 := auxIntToInt32(v.AuxInt)
2508 sym := auxToSym(v.Aux)
2509 val := v_0
2510 if v_1.Op != OpAMD64ADDQconst {
2511 break
2512 }
2513 off2 := auxIntToInt32(v_1.AuxInt)
2514 base := v_1.Args[0]
2515 mem := v_2
2516 if !(is32Bit(int64(off1) + int64(off2))) {
2517 break
2518 }
2519 v.reset(OpAMD64ADDSDload)
2520 v.AuxInt = int32ToAuxInt(off1 + off2)
2521 v.Aux = symToAux(sym)
2522 v.AddArg3(val, base, mem)
2523 return true
2524 }
2525
2526
2527
2528 for {
2529 off1 := auxIntToInt32(v.AuxInt)
2530 sym1 := auxToSym(v.Aux)
2531 val := v_0
2532 if v_1.Op != OpAMD64LEAQ {
2533 break
2534 }
2535 off2 := auxIntToInt32(v_1.AuxInt)
2536 sym2 := auxToSym(v_1.Aux)
2537 base := v_1.Args[0]
2538 mem := v_2
2539 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2540 break
2541 }
2542 v.reset(OpAMD64ADDSDload)
2543 v.AuxInt = int32ToAuxInt(off1 + off2)
2544 v.Aux = symToAux(mergeSym(sym1, sym2))
2545 v.AddArg3(val, base, mem)
2546 return true
2547 }
2548
2549
2550 for {
2551 off := auxIntToInt32(v.AuxInt)
2552 sym := auxToSym(v.Aux)
2553 x := v_0
2554 ptr := v_1
2555 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2556 break
2557 }
2558 y := v_2.Args[1]
2559 if ptr != v_2.Args[0] {
2560 break
2561 }
2562 v.reset(OpAMD64ADDSD)
2563 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2564 v0.AddArg(y)
2565 v.AddArg2(x, v0)
2566 return true
2567 }
2568 return false
2569 }
2570 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2571 v_1 := v.Args[1]
2572 v_0 := v.Args[0]
2573
2574
2575
2576 for {
2577 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2578 x := v_0
2579 l := v_1
2580 if l.Op != OpAMD64MOVSSload {
2581 continue
2582 }
2583 off := auxIntToInt32(l.AuxInt)
2584 sym := auxToSym(l.Aux)
2585 mem := l.Args[1]
2586 ptr := l.Args[0]
2587 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2588 continue
2589 }
2590 v.reset(OpAMD64ADDSSload)
2591 v.AuxInt = int32ToAuxInt(off)
2592 v.Aux = symToAux(sym)
2593 v.AddArg3(x, ptr, mem)
2594 return true
2595 }
2596 break
2597 }
2598 return false
2599 }
2600 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2601 v_2 := v.Args[2]
2602 v_1 := v.Args[1]
2603 v_0 := v.Args[0]
2604 b := v.Block
2605 typ := &b.Func.Config.Types
2606
2607
2608
2609 for {
2610 off1 := auxIntToInt32(v.AuxInt)
2611 sym := auxToSym(v.Aux)
2612 val := v_0
2613 if v_1.Op != OpAMD64ADDQconst {
2614 break
2615 }
2616 off2 := auxIntToInt32(v_1.AuxInt)
2617 base := v_1.Args[0]
2618 mem := v_2
2619 if !(is32Bit(int64(off1) + int64(off2))) {
2620 break
2621 }
2622 v.reset(OpAMD64ADDSSload)
2623 v.AuxInt = int32ToAuxInt(off1 + off2)
2624 v.Aux = symToAux(sym)
2625 v.AddArg3(val, base, mem)
2626 return true
2627 }
2628
2629
2630
2631 for {
2632 off1 := auxIntToInt32(v.AuxInt)
2633 sym1 := auxToSym(v.Aux)
2634 val := v_0
2635 if v_1.Op != OpAMD64LEAQ {
2636 break
2637 }
2638 off2 := auxIntToInt32(v_1.AuxInt)
2639 sym2 := auxToSym(v_1.Aux)
2640 base := v_1.Args[0]
2641 mem := v_2
2642 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2643 break
2644 }
2645 v.reset(OpAMD64ADDSSload)
2646 v.AuxInt = int32ToAuxInt(off1 + off2)
2647 v.Aux = symToAux(mergeSym(sym1, sym2))
2648 v.AddArg3(val, base, mem)
2649 return true
2650 }
2651
2652
2653 for {
2654 off := auxIntToInt32(v.AuxInt)
2655 sym := auxToSym(v.Aux)
2656 x := v_0
2657 ptr := v_1
2658 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2659 break
2660 }
2661 y := v_2.Args[1]
2662 if ptr != v_2.Args[0] {
2663 break
2664 }
2665 v.reset(OpAMD64ADDSS)
2666 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2667 v0.AddArg(y)
2668 v.AddArg2(x, v0)
2669 return true
2670 }
2671 return false
2672 }
2673 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2674 v_1 := v.Args[1]
2675 v_0 := v.Args[0]
2676
2677
2678 for {
2679 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2680 if v_0.Op != OpAMD64NOTL {
2681 continue
2682 }
2683 v_0_0 := v_0.Args[0]
2684 if v_0_0.Op != OpAMD64SHLL {
2685 continue
2686 }
2687 y := v_0_0.Args[1]
2688 v_0_0_0 := v_0_0.Args[0]
2689 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2690 continue
2691 }
2692 x := v_1
2693 v.reset(OpAMD64BTRL)
2694 v.AddArg2(x, y)
2695 return true
2696 }
2697 break
2698 }
2699
2700
2701
2702 for {
2703 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2704 if v_0.Op != OpAMD64MOVLconst {
2705 continue
2706 }
2707 c := auxIntToInt32(v_0.AuxInt)
2708 x := v_1
2709 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2710 continue
2711 }
2712 v.reset(OpAMD64BTRLconst)
2713 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2714 v.AddArg(x)
2715 return true
2716 }
2717 break
2718 }
2719
2720
2721 for {
2722 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2723 x := v_0
2724 if v_1.Op != OpAMD64MOVLconst {
2725 continue
2726 }
2727 c := auxIntToInt32(v_1.AuxInt)
2728 v.reset(OpAMD64ANDLconst)
2729 v.AuxInt = int32ToAuxInt(c)
2730 v.AddArg(x)
2731 return true
2732 }
2733 break
2734 }
2735
2736
2737 for {
2738 x := v_0
2739 if x != v_1 {
2740 break
2741 }
2742 v.copyOf(x)
2743 return true
2744 }
2745
2746
2747
2748 for {
2749 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2750 x := v_0
2751 l := v_1
2752 if l.Op != OpAMD64MOVLload {
2753 continue
2754 }
2755 off := auxIntToInt32(l.AuxInt)
2756 sym := auxToSym(l.Aux)
2757 mem := l.Args[1]
2758 ptr := l.Args[0]
2759 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2760 continue
2761 }
2762 v.reset(OpAMD64ANDLload)
2763 v.AuxInt = int32ToAuxInt(off)
2764 v.Aux = symToAux(sym)
2765 v.AddArg3(x, ptr, mem)
2766 return true
2767 }
2768 break
2769 }
2770 return false
2771 }
2772 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2773 v_0 := v.Args[0]
2774
2775
2776
2777 for {
2778 c := auxIntToInt32(v.AuxInt)
2779 x := v_0
2780 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2781 break
2782 }
2783 v.reset(OpAMD64BTRLconst)
2784 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2785 v.AddArg(x)
2786 return true
2787 }
2788
2789
2790 for {
2791 c := auxIntToInt32(v.AuxInt)
2792 if v_0.Op != OpAMD64ANDLconst {
2793 break
2794 }
2795 d := auxIntToInt32(v_0.AuxInt)
2796 x := v_0.Args[0]
2797 v.reset(OpAMD64ANDLconst)
2798 v.AuxInt = int32ToAuxInt(c & d)
2799 v.AddArg(x)
2800 return true
2801 }
2802
2803
2804 for {
2805 c := auxIntToInt32(v.AuxInt)
2806 if v_0.Op != OpAMD64BTRLconst {
2807 break
2808 }
2809 d := auxIntToInt8(v_0.AuxInt)
2810 x := v_0.Args[0]
2811 v.reset(OpAMD64ANDLconst)
2812 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2813 v.AddArg(x)
2814 return true
2815 }
2816
2817
2818 for {
2819 if auxIntToInt32(v.AuxInt) != 0xFF {
2820 break
2821 }
2822 x := v_0
2823 v.reset(OpAMD64MOVBQZX)
2824 v.AddArg(x)
2825 return true
2826 }
2827
2828
2829 for {
2830 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2831 break
2832 }
2833 x := v_0
2834 v.reset(OpAMD64MOVWQZX)
2835 v.AddArg(x)
2836 return true
2837 }
2838
2839
2840
2841 for {
2842 c := auxIntToInt32(v.AuxInt)
2843 if !(c == 0) {
2844 break
2845 }
2846 v.reset(OpAMD64MOVLconst)
2847 v.AuxInt = int32ToAuxInt(0)
2848 return true
2849 }
2850
2851
2852
2853 for {
2854 c := auxIntToInt32(v.AuxInt)
2855 x := v_0
2856 if !(c == -1) {
2857 break
2858 }
2859 v.copyOf(x)
2860 return true
2861 }
2862
2863
2864 for {
2865 c := auxIntToInt32(v.AuxInt)
2866 if v_0.Op != OpAMD64MOVLconst {
2867 break
2868 }
2869 d := auxIntToInt32(v_0.AuxInt)
2870 v.reset(OpAMD64MOVLconst)
2871 v.AuxInt = int32ToAuxInt(c & d)
2872 return true
2873 }
2874 return false
2875 }
2876 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2877 v_1 := v.Args[1]
2878 v_0 := v.Args[0]
2879
2880
2881
2882 for {
2883 valoff1 := auxIntToValAndOff(v.AuxInt)
2884 sym := auxToSym(v.Aux)
2885 if v_0.Op != OpAMD64ADDQconst {
2886 break
2887 }
2888 off2 := auxIntToInt32(v_0.AuxInt)
2889 base := v_0.Args[0]
2890 mem := v_1
2891 if !(ValAndOff(valoff1).canAdd32(off2)) {
2892 break
2893 }
2894 v.reset(OpAMD64ANDLconstmodify)
2895 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2896 v.Aux = symToAux(sym)
2897 v.AddArg2(base, mem)
2898 return true
2899 }
2900
2901
2902
2903 for {
2904 valoff1 := auxIntToValAndOff(v.AuxInt)
2905 sym1 := auxToSym(v.Aux)
2906 if v_0.Op != OpAMD64LEAQ {
2907 break
2908 }
2909 off2 := auxIntToInt32(v_0.AuxInt)
2910 sym2 := auxToSym(v_0.Aux)
2911 base := v_0.Args[0]
2912 mem := v_1
2913 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2914 break
2915 }
2916 v.reset(OpAMD64ANDLconstmodify)
2917 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2918 v.Aux = symToAux(mergeSym(sym1, sym2))
2919 v.AddArg2(base, mem)
2920 return true
2921 }
2922 return false
2923 }
2924 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2925 v_2 := v.Args[2]
2926 v_1 := v.Args[1]
2927 v_0 := v.Args[0]
2928 b := v.Block
2929 typ := &b.Func.Config.Types
2930
2931
2932
2933 for {
2934 off1 := auxIntToInt32(v.AuxInt)
2935 sym := auxToSym(v.Aux)
2936 val := v_0
2937 if v_1.Op != OpAMD64ADDQconst {
2938 break
2939 }
2940 off2 := auxIntToInt32(v_1.AuxInt)
2941 base := v_1.Args[0]
2942 mem := v_2
2943 if !(is32Bit(int64(off1) + int64(off2))) {
2944 break
2945 }
2946 v.reset(OpAMD64ANDLload)
2947 v.AuxInt = int32ToAuxInt(off1 + off2)
2948 v.Aux = symToAux(sym)
2949 v.AddArg3(val, base, mem)
2950 return true
2951 }
2952
2953
2954
2955 for {
2956 off1 := auxIntToInt32(v.AuxInt)
2957 sym1 := auxToSym(v.Aux)
2958 val := v_0
2959 if v_1.Op != OpAMD64LEAQ {
2960 break
2961 }
2962 off2 := auxIntToInt32(v_1.AuxInt)
2963 sym2 := auxToSym(v_1.Aux)
2964 base := v_1.Args[0]
2965 mem := v_2
2966 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2967 break
2968 }
2969 v.reset(OpAMD64ANDLload)
2970 v.AuxInt = int32ToAuxInt(off1 + off2)
2971 v.Aux = symToAux(mergeSym(sym1, sym2))
2972 v.AddArg3(val, base, mem)
2973 return true
2974 }
2975
2976
2977 for {
2978 off := auxIntToInt32(v.AuxInt)
2979 sym := auxToSym(v.Aux)
2980 x := v_0
2981 ptr := v_1
2982 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2983 break
2984 }
2985 y := v_2.Args[1]
2986 if ptr != v_2.Args[0] {
2987 break
2988 }
2989 v.reset(OpAMD64ANDL)
2990 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2991 v0.AddArg(y)
2992 v.AddArg2(x, v0)
2993 return true
2994 }
2995 return false
2996 }
2997 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2998 v_2 := v.Args[2]
2999 v_1 := v.Args[1]
3000 v_0 := v.Args[0]
3001 b := v.Block
3002
3003
3004 for {
3005 off := auxIntToInt32(v.AuxInt)
3006 sym := auxToSym(v.Aux)
3007 ptr := v_0
3008 if v_1.Op != OpAMD64NOTL {
3009 break
3010 }
3011 s := v_1.Args[0]
3012 if s.Op != OpAMD64SHLL {
3013 break
3014 }
3015 t := s.Type
3016 x := s.Args[1]
3017 s_0 := s.Args[0]
3018 if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
3019 break
3020 }
3021 mem := v_2
3022 v.reset(OpAMD64BTRLmodify)
3023 v.AuxInt = int32ToAuxInt(off)
3024 v.Aux = symToAux(sym)
3025 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
3026 v0.AuxInt = int32ToAuxInt(31)
3027 v0.AddArg(x)
3028 v.AddArg3(ptr, v0, mem)
3029 return true
3030 }
3031
3032
3033
3034 for {
3035 off1 := auxIntToInt32(v.AuxInt)
3036 sym := auxToSym(v.Aux)
3037 if v_0.Op != OpAMD64ADDQconst {
3038 break
3039 }
3040 off2 := auxIntToInt32(v_0.AuxInt)
3041 base := v_0.Args[0]
3042 val := v_1
3043 mem := v_2
3044 if !(is32Bit(int64(off1) + int64(off2))) {
3045 break
3046 }
3047 v.reset(OpAMD64ANDLmodify)
3048 v.AuxInt = int32ToAuxInt(off1 + off2)
3049 v.Aux = symToAux(sym)
3050 v.AddArg3(base, val, mem)
3051 return true
3052 }
3053
3054
3055
3056 for {
3057 off1 := auxIntToInt32(v.AuxInt)
3058 sym1 := auxToSym(v.Aux)
3059 if v_0.Op != OpAMD64LEAQ {
3060 break
3061 }
3062 off2 := auxIntToInt32(v_0.AuxInt)
3063 sym2 := auxToSym(v_0.Aux)
3064 base := v_0.Args[0]
3065 val := v_1
3066 mem := v_2
3067 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3068 break
3069 }
3070 v.reset(OpAMD64ANDLmodify)
3071 v.AuxInt = int32ToAuxInt(off1 + off2)
3072 v.Aux = symToAux(mergeSym(sym1, sym2))
3073 v.AddArg3(base, val, mem)
3074 return true
3075 }
3076 return false
3077 }
3078 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3079 v_1 := v.Args[1]
3080 v_0 := v.Args[0]
3081
3082
3083 for {
3084 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3085 if v_0.Op != OpAMD64NOTQ {
3086 continue
3087 }
3088 v_0_0 := v_0.Args[0]
3089 if v_0_0.Op != OpAMD64SHLQ {
3090 continue
3091 }
3092 y := v_0_0.Args[1]
3093 v_0_0_0 := v_0_0.Args[0]
3094 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3095 continue
3096 }
3097 x := v_1
3098 v.reset(OpAMD64BTRQ)
3099 v.AddArg2(x, y)
3100 return true
3101 }
3102 break
3103 }
3104
3105
3106
3107 for {
3108 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3109 if v_0.Op != OpAMD64MOVQconst {
3110 continue
3111 }
3112 c := auxIntToInt64(v_0.AuxInt)
3113 x := v_1
3114 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3115 continue
3116 }
3117 v.reset(OpAMD64BTRQconst)
3118 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3119 v.AddArg(x)
3120 return true
3121 }
3122 break
3123 }
3124
3125
3126
3127 for {
3128 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3129 x := v_0
3130 if v_1.Op != OpAMD64MOVQconst {
3131 continue
3132 }
3133 c := auxIntToInt64(v_1.AuxInt)
3134 if !(is32Bit(c)) {
3135 continue
3136 }
3137 v.reset(OpAMD64ANDQconst)
3138 v.AuxInt = int32ToAuxInt(int32(c))
3139 v.AddArg(x)
3140 return true
3141 }
3142 break
3143 }
3144
3145
3146 for {
3147 x := v_0
3148 if x != v_1 {
3149 break
3150 }
3151 v.copyOf(x)
3152 return true
3153 }
3154
3155
3156
3157 for {
3158 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3159 x := v_0
3160 l := v_1
3161 if l.Op != OpAMD64MOVQload {
3162 continue
3163 }
3164 off := auxIntToInt32(l.AuxInt)
3165 sym := auxToSym(l.Aux)
3166 mem := l.Args[1]
3167 ptr := l.Args[0]
3168 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3169 continue
3170 }
3171 v.reset(OpAMD64ANDQload)
3172 v.AuxInt = int32ToAuxInt(off)
3173 v.Aux = symToAux(sym)
3174 v.AddArg3(x, ptr, mem)
3175 return true
3176 }
3177 break
3178 }
3179 return false
3180 }
3181 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3182 v_0 := v.Args[0]
3183
3184
3185
3186 for {
3187 c := auxIntToInt32(v.AuxInt)
3188 x := v_0
3189 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3190 break
3191 }
3192 v.reset(OpAMD64BTRQconst)
3193 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3194 v.AddArg(x)
3195 return true
3196 }
3197
3198
3199 for {
3200 c := auxIntToInt32(v.AuxInt)
3201 if v_0.Op != OpAMD64ANDQconst {
3202 break
3203 }
3204 d := auxIntToInt32(v_0.AuxInt)
3205 x := v_0.Args[0]
3206 v.reset(OpAMD64ANDQconst)
3207 v.AuxInt = int32ToAuxInt(c & d)
3208 v.AddArg(x)
3209 return true
3210 }
3211
3212
3213
3214 for {
3215 c := auxIntToInt32(v.AuxInt)
3216 if v_0.Op != OpAMD64BTRQconst {
3217 break
3218 }
3219 d := auxIntToInt8(v_0.AuxInt)
3220 x := v_0.Args[0]
3221 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3222 break
3223 }
3224 v.reset(OpAMD64ANDQconst)
3225 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3226 v.AddArg(x)
3227 return true
3228 }
3229
3230
3231 for {
3232 if auxIntToInt32(v.AuxInt) != 0xFF {
3233 break
3234 }
3235 x := v_0
3236 v.reset(OpAMD64MOVBQZX)
3237 v.AddArg(x)
3238 return true
3239 }
3240
3241
3242 for {
3243 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3244 break
3245 }
3246 x := v_0
3247 v.reset(OpAMD64MOVWQZX)
3248 v.AddArg(x)
3249 return true
3250 }
3251
3252
3253 for {
3254 if auxIntToInt32(v.AuxInt) != 0 {
3255 break
3256 }
3257 v.reset(OpAMD64MOVQconst)
3258 v.AuxInt = int64ToAuxInt(0)
3259 return true
3260 }
3261
3262
3263 for {
3264 if auxIntToInt32(v.AuxInt) != -1 {
3265 break
3266 }
3267 x := v_0
3268 v.copyOf(x)
3269 return true
3270 }
3271
3272
3273 for {
3274 c := auxIntToInt32(v.AuxInt)
3275 if v_0.Op != OpAMD64MOVQconst {
3276 break
3277 }
3278 d := auxIntToInt64(v_0.AuxInt)
3279 v.reset(OpAMD64MOVQconst)
3280 v.AuxInt = int64ToAuxInt(int64(c) & d)
3281 return true
3282 }
3283 return false
3284 }
3285 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3286 v_1 := v.Args[1]
3287 v_0 := v.Args[0]
3288
3289
3290
3291 for {
3292 valoff1 := auxIntToValAndOff(v.AuxInt)
3293 sym := auxToSym(v.Aux)
3294 if v_0.Op != OpAMD64ADDQconst {
3295 break
3296 }
3297 off2 := auxIntToInt32(v_0.AuxInt)
3298 base := v_0.Args[0]
3299 mem := v_1
3300 if !(ValAndOff(valoff1).canAdd32(off2)) {
3301 break
3302 }
3303 v.reset(OpAMD64ANDQconstmodify)
3304 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3305 v.Aux = symToAux(sym)
3306 v.AddArg2(base, mem)
3307 return true
3308 }
3309
3310
3311
3312 for {
3313 valoff1 := auxIntToValAndOff(v.AuxInt)
3314 sym1 := auxToSym(v.Aux)
3315 if v_0.Op != OpAMD64LEAQ {
3316 break
3317 }
3318 off2 := auxIntToInt32(v_0.AuxInt)
3319 sym2 := auxToSym(v_0.Aux)
3320 base := v_0.Args[0]
3321 mem := v_1
3322 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3323 break
3324 }
3325 v.reset(OpAMD64ANDQconstmodify)
3326 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3327 v.Aux = symToAux(mergeSym(sym1, sym2))
3328 v.AddArg2(base, mem)
3329 return true
3330 }
3331 return false
3332 }
3333 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3334 v_2 := v.Args[2]
3335 v_1 := v.Args[1]
3336 v_0 := v.Args[0]
3337 b := v.Block
3338 typ := &b.Func.Config.Types
3339
3340
3341
3342 for {
3343 off1 := auxIntToInt32(v.AuxInt)
3344 sym := auxToSym(v.Aux)
3345 val := v_0
3346 if v_1.Op != OpAMD64ADDQconst {
3347 break
3348 }
3349 off2 := auxIntToInt32(v_1.AuxInt)
3350 base := v_1.Args[0]
3351 mem := v_2
3352 if !(is32Bit(int64(off1) + int64(off2))) {
3353 break
3354 }
3355 v.reset(OpAMD64ANDQload)
3356 v.AuxInt = int32ToAuxInt(off1 + off2)
3357 v.Aux = symToAux(sym)
3358 v.AddArg3(val, base, mem)
3359 return true
3360 }
3361
3362
3363
3364 for {
3365 off1 := auxIntToInt32(v.AuxInt)
3366 sym1 := auxToSym(v.Aux)
3367 val := v_0
3368 if v_1.Op != OpAMD64LEAQ {
3369 break
3370 }
3371 off2 := auxIntToInt32(v_1.AuxInt)
3372 sym2 := auxToSym(v_1.Aux)
3373 base := v_1.Args[0]
3374 mem := v_2
3375 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3376 break
3377 }
3378 v.reset(OpAMD64ANDQload)
3379 v.AuxInt = int32ToAuxInt(off1 + off2)
3380 v.Aux = symToAux(mergeSym(sym1, sym2))
3381 v.AddArg3(val, base, mem)
3382 return true
3383 }
3384
3385
3386 for {
3387 off := auxIntToInt32(v.AuxInt)
3388 sym := auxToSym(v.Aux)
3389 x := v_0
3390 ptr := v_1
3391 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3392 break
3393 }
3394 y := v_2.Args[1]
3395 if ptr != v_2.Args[0] {
3396 break
3397 }
3398 v.reset(OpAMD64ANDQ)
3399 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3400 v0.AddArg(y)
3401 v.AddArg2(x, v0)
3402 return true
3403 }
3404 return false
3405 }
3406 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3407 v_2 := v.Args[2]
3408 v_1 := v.Args[1]
3409 v_0 := v.Args[0]
3410 b := v.Block
3411
3412
3413 for {
3414 off := auxIntToInt32(v.AuxInt)
3415 sym := auxToSym(v.Aux)
3416 ptr := v_0
3417 if v_1.Op != OpAMD64NOTQ {
3418 break
3419 }
3420 s := v_1.Args[0]
3421 if s.Op != OpAMD64SHLQ {
3422 break
3423 }
3424 t := s.Type
3425 x := s.Args[1]
3426 s_0 := s.Args[0]
3427 if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
3428 break
3429 }
3430 mem := v_2
3431 v.reset(OpAMD64BTRQmodify)
3432 v.AuxInt = int32ToAuxInt(off)
3433 v.Aux = symToAux(sym)
3434 v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
3435 v0.AuxInt = int32ToAuxInt(63)
3436 v0.AddArg(x)
3437 v.AddArg3(ptr, v0, mem)
3438 return true
3439 }
3440
3441
3442
3443 for {
3444 off1 := auxIntToInt32(v.AuxInt)
3445 sym := auxToSym(v.Aux)
3446 if v_0.Op != OpAMD64ADDQconst {
3447 break
3448 }
3449 off2 := auxIntToInt32(v_0.AuxInt)
3450 base := v_0.Args[0]
3451 val := v_1
3452 mem := v_2
3453 if !(is32Bit(int64(off1) + int64(off2))) {
3454 break
3455 }
3456 v.reset(OpAMD64ANDQmodify)
3457 v.AuxInt = int32ToAuxInt(off1 + off2)
3458 v.Aux = symToAux(sym)
3459 v.AddArg3(base, val, mem)
3460 return true
3461 }
3462
3463
3464
3465 for {
3466 off1 := auxIntToInt32(v.AuxInt)
3467 sym1 := auxToSym(v.Aux)
3468 if v_0.Op != OpAMD64LEAQ {
3469 break
3470 }
3471 off2 := auxIntToInt32(v_0.AuxInt)
3472 sym2 := auxToSym(v_0.Aux)
3473 base := v_0.Args[0]
3474 val := v_1
3475 mem := v_2
3476 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3477 break
3478 }
3479 v.reset(OpAMD64ANDQmodify)
3480 v.AuxInt = int32ToAuxInt(off1 + off2)
3481 v.Aux = symToAux(mergeSym(sym1, sym2))
3482 v.AddArg3(base, val, mem)
3483 return true
3484 }
3485 return false
3486 }
3487 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3488 v_0 := v.Args[0]
3489 b := v.Block
3490
3491
3492 for {
3493 if v_0.Op != OpAMD64ORQconst {
3494 break
3495 }
3496 t := v_0.Type
3497 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3498 break
3499 }
3500 v_0_0 := v_0.Args[0]
3501 if v_0_0.Op != OpAMD64MOVBQZX {
3502 break
3503 }
3504 x := v_0_0.Args[0]
3505 v.reset(OpAMD64BSFQ)
3506 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3507 v0.AuxInt = int32ToAuxInt(1 << 8)
3508 v0.AddArg(x)
3509 v.AddArg(v0)
3510 return true
3511 }
3512
3513
3514 for {
3515 if v_0.Op != OpAMD64ORQconst {
3516 break
3517 }
3518 t := v_0.Type
3519 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3520 break
3521 }
3522 v_0_0 := v_0.Args[0]
3523 if v_0_0.Op != OpAMD64MOVWQZX {
3524 break
3525 }
3526 x := v_0_0.Args[0]
3527 v.reset(OpAMD64BSFQ)
3528 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3529 v0.AuxInt = int32ToAuxInt(1 << 16)
3530 v0.AddArg(x)
3531 v.AddArg(v0)
3532 return true
3533 }
3534 return false
3535 }
3536 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3537 v_0 := v.Args[0]
3538
3539
3540 for {
3541 c := auxIntToInt8(v.AuxInt)
3542 if v_0.Op != OpAMD64XORLconst {
3543 break
3544 }
3545 d := auxIntToInt32(v_0.AuxInt)
3546 x := v_0.Args[0]
3547 v.reset(OpAMD64XORLconst)
3548 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3549 v.AddArg(x)
3550 return true
3551 }
3552
3553
3554 for {
3555 c := auxIntToInt8(v.AuxInt)
3556 if v_0.Op != OpAMD64BTCLconst {
3557 break
3558 }
3559 d := auxIntToInt8(v_0.AuxInt)
3560 x := v_0.Args[0]
3561 v.reset(OpAMD64XORLconst)
3562 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3563 v.AddArg(x)
3564 return true
3565 }
3566
3567
3568 for {
3569 c := auxIntToInt8(v.AuxInt)
3570 if v_0.Op != OpAMD64MOVLconst {
3571 break
3572 }
3573 d := auxIntToInt32(v_0.AuxInt)
3574 v.reset(OpAMD64MOVLconst)
3575 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3576 return true
3577 }
3578 return false
3579 }
3580 func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
3581 v_1 := v.Args[1]
3582 v_0 := v.Args[0]
3583
3584
3585
3586 for {
3587 valoff1 := auxIntToValAndOff(v.AuxInt)
3588 sym := auxToSym(v.Aux)
3589 if v_0.Op != OpAMD64ADDQconst {
3590 break
3591 }
3592 off2 := auxIntToInt32(v_0.AuxInt)
3593 base := v_0.Args[0]
3594 mem := v_1
3595 if !(ValAndOff(valoff1).canAdd32(off2)) {
3596 break
3597 }
3598 v.reset(OpAMD64BTCLconstmodify)
3599 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3600 v.Aux = symToAux(sym)
3601 v.AddArg2(base, mem)
3602 return true
3603 }
3604
3605
3606
3607 for {
3608 valoff1 := auxIntToValAndOff(v.AuxInt)
3609 sym1 := auxToSym(v.Aux)
3610 if v_0.Op != OpAMD64LEAQ {
3611 break
3612 }
3613 off2 := auxIntToInt32(v_0.AuxInt)
3614 sym2 := auxToSym(v_0.Aux)
3615 base := v_0.Args[0]
3616 mem := v_1
3617 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3618 break
3619 }
3620 v.reset(OpAMD64BTCLconstmodify)
3621 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3622 v.Aux = symToAux(mergeSym(sym1, sym2))
3623 v.AddArg2(base, mem)
3624 return true
3625 }
3626 return false
3627 }
3628 func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
3629 v_2 := v.Args[2]
3630 v_1 := v.Args[1]
3631 v_0 := v.Args[0]
3632
3633
3634
3635 for {
3636 off1 := auxIntToInt32(v.AuxInt)
3637 sym := auxToSym(v.Aux)
3638 if v_0.Op != OpAMD64ADDQconst {
3639 break
3640 }
3641 off2 := auxIntToInt32(v_0.AuxInt)
3642 base := v_0.Args[0]
3643 val := v_1
3644 mem := v_2
3645 if !(is32Bit(int64(off1) + int64(off2))) {
3646 break
3647 }
3648 v.reset(OpAMD64BTCLmodify)
3649 v.AuxInt = int32ToAuxInt(off1 + off2)
3650 v.Aux = symToAux(sym)
3651 v.AddArg3(base, val, mem)
3652 return true
3653 }
3654
3655
3656
3657 for {
3658 off1 := auxIntToInt32(v.AuxInt)
3659 sym1 := auxToSym(v.Aux)
3660 if v_0.Op != OpAMD64LEAQ {
3661 break
3662 }
3663 off2 := auxIntToInt32(v_0.AuxInt)
3664 sym2 := auxToSym(v_0.Aux)
3665 base := v_0.Args[0]
3666 val := v_1
3667 mem := v_2
3668 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3669 break
3670 }
3671 v.reset(OpAMD64BTCLmodify)
3672 v.AuxInt = int32ToAuxInt(off1 + off2)
3673 v.Aux = symToAux(mergeSym(sym1, sym2))
3674 v.AddArg3(base, val, mem)
3675 return true
3676 }
3677 return false
3678 }
3679 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3680 v_0 := v.Args[0]
3681
3682
3683
3684 for {
3685 c := auxIntToInt8(v.AuxInt)
3686 if v_0.Op != OpAMD64XORQconst {
3687 break
3688 }
3689 d := auxIntToInt32(v_0.AuxInt)
3690 x := v_0.Args[0]
3691 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3692 break
3693 }
3694 v.reset(OpAMD64XORQconst)
3695 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3696 v.AddArg(x)
3697 return true
3698 }
3699
3700
3701
3702 for {
3703 c := auxIntToInt8(v.AuxInt)
3704 if v_0.Op != OpAMD64BTCQconst {
3705 break
3706 }
3707 d := auxIntToInt8(v_0.AuxInt)
3708 x := v_0.Args[0]
3709 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3710 break
3711 }
3712 v.reset(OpAMD64XORQconst)
3713 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3714 v.AddArg(x)
3715 return true
3716 }
3717
3718
3719 for {
3720 c := auxIntToInt8(v.AuxInt)
3721 if v_0.Op != OpAMD64MOVQconst {
3722 break
3723 }
3724 d := auxIntToInt64(v_0.AuxInt)
3725 v.reset(OpAMD64MOVQconst)
3726 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3727 return true
3728 }
3729 return false
3730 }
3731 func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
3732 v_1 := v.Args[1]
3733 v_0 := v.Args[0]
3734
3735
3736
3737 for {
3738 valoff1 := auxIntToValAndOff(v.AuxInt)
3739 sym := auxToSym(v.Aux)
3740 if v_0.Op != OpAMD64ADDQconst {
3741 break
3742 }
3743 off2 := auxIntToInt32(v_0.AuxInt)
3744 base := v_0.Args[0]
3745 mem := v_1
3746 if !(ValAndOff(valoff1).canAdd32(off2)) {
3747 break
3748 }
3749 v.reset(OpAMD64BTCQconstmodify)
3750 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3751 v.Aux = symToAux(sym)
3752 v.AddArg2(base, mem)
3753 return true
3754 }
3755
3756
3757
3758 for {
3759 valoff1 := auxIntToValAndOff(v.AuxInt)
3760 sym1 := auxToSym(v.Aux)
3761 if v_0.Op != OpAMD64LEAQ {
3762 break
3763 }
3764 off2 := auxIntToInt32(v_0.AuxInt)
3765 sym2 := auxToSym(v_0.Aux)
3766 base := v_0.Args[0]
3767 mem := v_1
3768 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3769 break
3770 }
3771 v.reset(OpAMD64BTCQconstmodify)
3772 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3773 v.Aux = symToAux(mergeSym(sym1, sym2))
3774 v.AddArg2(base, mem)
3775 return true
3776 }
3777 return false
3778 }
3779 func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
3780 v_2 := v.Args[2]
3781 v_1 := v.Args[1]
3782 v_0 := v.Args[0]
3783
3784
3785
3786 for {
3787 off1 := auxIntToInt32(v.AuxInt)
3788 sym := auxToSym(v.Aux)
3789 if v_0.Op != OpAMD64ADDQconst {
3790 break
3791 }
3792 off2 := auxIntToInt32(v_0.AuxInt)
3793 base := v_0.Args[0]
3794 val := v_1
3795 mem := v_2
3796 if !(is32Bit(int64(off1) + int64(off2))) {
3797 break
3798 }
3799 v.reset(OpAMD64BTCQmodify)
3800 v.AuxInt = int32ToAuxInt(off1 + off2)
3801 v.Aux = symToAux(sym)
3802 v.AddArg3(base, val, mem)
3803 return true
3804 }
3805
3806
3807
3808 for {
3809 off1 := auxIntToInt32(v.AuxInt)
3810 sym1 := auxToSym(v.Aux)
3811 if v_0.Op != OpAMD64LEAQ {
3812 break
3813 }
3814 off2 := auxIntToInt32(v_0.AuxInt)
3815 sym2 := auxToSym(v_0.Aux)
3816 base := v_0.Args[0]
3817 val := v_1
3818 mem := v_2
3819 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3820 break
3821 }
3822 v.reset(OpAMD64BTCQmodify)
3823 v.AuxInt = int32ToAuxInt(off1 + off2)
3824 v.Aux = symToAux(mergeSym(sym1, sym2))
3825 v.AddArg3(base, val, mem)
3826 return true
3827 }
3828 return false
3829 }
3830 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3831 v_0 := v.Args[0]
3832
3833
3834
3835 for {
3836 c := auxIntToInt8(v.AuxInt)
3837 if v_0.Op != OpAMD64SHRQconst {
3838 break
3839 }
3840 d := auxIntToInt8(v_0.AuxInt)
3841 x := v_0.Args[0]
3842 if !((c + d) < 64) {
3843 break
3844 }
3845 v.reset(OpAMD64BTQconst)
3846 v.AuxInt = int8ToAuxInt(c + d)
3847 v.AddArg(x)
3848 return true
3849 }
3850
3851
3852
3853 for {
3854 c := auxIntToInt8(v.AuxInt)
3855 if v_0.Op != OpAMD64SHLQconst {
3856 break
3857 }
3858 d := auxIntToInt8(v_0.AuxInt)
3859 x := v_0.Args[0]
3860 if !(c > d) {
3861 break
3862 }
3863 v.reset(OpAMD64BTLconst)
3864 v.AuxInt = int8ToAuxInt(c - d)
3865 v.AddArg(x)
3866 return true
3867 }
3868
3869
3870 for {
3871 if auxIntToInt8(v.AuxInt) != 0 {
3872 break
3873 }
3874 s := v_0
3875 if s.Op != OpAMD64SHRQ {
3876 break
3877 }
3878 y := s.Args[1]
3879 x := s.Args[0]
3880 v.reset(OpAMD64BTQ)
3881 v.AddArg2(y, x)
3882 return true
3883 }
3884
3885
3886
3887 for {
3888 c := auxIntToInt8(v.AuxInt)
3889 if v_0.Op != OpAMD64SHRLconst {
3890 break
3891 }
3892 d := auxIntToInt8(v_0.AuxInt)
3893 x := v_0.Args[0]
3894 if !((c + d) < 32) {
3895 break
3896 }
3897 v.reset(OpAMD64BTLconst)
3898 v.AuxInt = int8ToAuxInt(c + d)
3899 v.AddArg(x)
3900 return true
3901 }
3902
3903
3904
3905 for {
3906 c := auxIntToInt8(v.AuxInt)
3907 if v_0.Op != OpAMD64SHLLconst {
3908 break
3909 }
3910 d := auxIntToInt8(v_0.AuxInt)
3911 x := v_0.Args[0]
3912 if !(c > d) {
3913 break
3914 }
3915 v.reset(OpAMD64BTLconst)
3916 v.AuxInt = int8ToAuxInt(c - d)
3917 v.AddArg(x)
3918 return true
3919 }
3920
3921
3922 for {
3923 if auxIntToInt8(v.AuxInt) != 0 {
3924 break
3925 }
3926 s := v_0
3927 if s.Op != OpAMD64SHRL {
3928 break
3929 }
3930 y := s.Args[1]
3931 x := s.Args[0]
3932 v.reset(OpAMD64BTL)
3933 v.AddArg2(y, x)
3934 return true
3935 }
3936 return false
3937 }
3938 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3939 v_0 := v.Args[0]
3940
3941
3942
3943 for {
3944 c := auxIntToInt8(v.AuxInt)
3945 if v_0.Op != OpAMD64SHRQconst {
3946 break
3947 }
3948 d := auxIntToInt8(v_0.AuxInt)
3949 x := v_0.Args[0]
3950 if !((c + d) < 64) {
3951 break
3952 }
3953 v.reset(OpAMD64BTQconst)
3954 v.AuxInt = int8ToAuxInt(c + d)
3955 v.AddArg(x)
3956 return true
3957 }
3958
3959
3960
3961 for {
3962 c := auxIntToInt8(v.AuxInt)
3963 if v_0.Op != OpAMD64SHLQconst {
3964 break
3965 }
3966 d := auxIntToInt8(v_0.AuxInt)
3967 x := v_0.Args[0]
3968 if !(c > d) {
3969 break
3970 }
3971 v.reset(OpAMD64BTQconst)
3972 v.AuxInt = int8ToAuxInt(c - d)
3973 v.AddArg(x)
3974 return true
3975 }
3976
3977
3978 for {
3979 if auxIntToInt8(v.AuxInt) != 0 {
3980 break
3981 }
3982 s := v_0
3983 if s.Op != OpAMD64SHRQ {
3984 break
3985 }
3986 y := s.Args[1]
3987 x := s.Args[0]
3988 v.reset(OpAMD64BTQ)
3989 v.AddArg2(y, x)
3990 return true
3991 }
3992 return false
3993 }
3994 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3995 v_0 := v.Args[0]
3996
3997
3998 for {
3999 c := auxIntToInt8(v.AuxInt)
4000 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
4001 break
4002 }
4003 x := v_0.Args[0]
4004 v.reset(OpAMD64BTRLconst)
4005 v.AuxInt = int8ToAuxInt(c)
4006 v.AddArg(x)
4007 return true
4008 }
4009
4010
4011 for {
4012 c := auxIntToInt8(v.AuxInt)
4013 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4014 break
4015 }
4016 x := v_0.Args[0]
4017 v.reset(OpAMD64BTRLconst)
4018 v.AuxInt = int8ToAuxInt(c)
4019 v.AddArg(x)
4020 return true
4021 }
4022
4023
4024 for {
4025 c := auxIntToInt8(v.AuxInt)
4026 if v_0.Op != OpAMD64ANDLconst {
4027 break
4028 }
4029 d := auxIntToInt32(v_0.AuxInt)
4030 x := v_0.Args[0]
4031 v.reset(OpAMD64ANDLconst)
4032 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4033 v.AddArg(x)
4034 return true
4035 }
4036
4037
4038 for {
4039 c := auxIntToInt8(v.AuxInt)
4040 if v_0.Op != OpAMD64BTRLconst {
4041 break
4042 }
4043 d := auxIntToInt8(v_0.AuxInt)
4044 x := v_0.Args[0]
4045 v.reset(OpAMD64ANDLconst)
4046 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4047 v.AddArg(x)
4048 return true
4049 }
4050
4051
4052 for {
4053 c := auxIntToInt8(v.AuxInt)
4054 if v_0.Op != OpAMD64MOVLconst {
4055 break
4056 }
4057 d := auxIntToInt32(v_0.AuxInt)
4058 v.reset(OpAMD64MOVLconst)
4059 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4060 return true
4061 }
4062 return false
4063 }
4064 func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
4065 v_1 := v.Args[1]
4066 v_0 := v.Args[0]
4067
4068
4069
4070 for {
4071 valoff1 := auxIntToValAndOff(v.AuxInt)
4072 sym := auxToSym(v.Aux)
4073 if v_0.Op != OpAMD64ADDQconst {
4074 break
4075 }
4076 off2 := auxIntToInt32(v_0.AuxInt)
4077 base := v_0.Args[0]
4078 mem := v_1
4079 if !(ValAndOff(valoff1).canAdd32(off2)) {
4080 break
4081 }
4082 v.reset(OpAMD64BTRLconstmodify)
4083 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4084 v.Aux = symToAux(sym)
4085 v.AddArg2(base, mem)
4086 return true
4087 }
4088
4089
4090
4091 for {
4092 valoff1 := auxIntToValAndOff(v.AuxInt)
4093 sym1 := auxToSym(v.Aux)
4094 if v_0.Op != OpAMD64LEAQ {
4095 break
4096 }
4097 off2 := auxIntToInt32(v_0.AuxInt)
4098 sym2 := auxToSym(v_0.Aux)
4099 base := v_0.Args[0]
4100 mem := v_1
4101 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4102 break
4103 }
4104 v.reset(OpAMD64BTRLconstmodify)
4105 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4106 v.Aux = symToAux(mergeSym(sym1, sym2))
4107 v.AddArg2(base, mem)
4108 return true
4109 }
4110 return false
4111 }
4112 func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
4113 v_2 := v.Args[2]
4114 v_1 := v.Args[1]
4115 v_0 := v.Args[0]
4116
4117
4118
4119 for {
4120 off1 := auxIntToInt32(v.AuxInt)
4121 sym := auxToSym(v.Aux)
4122 if v_0.Op != OpAMD64ADDQconst {
4123 break
4124 }
4125 off2 := auxIntToInt32(v_0.AuxInt)
4126 base := v_0.Args[0]
4127 val := v_1
4128 mem := v_2
4129 if !(is32Bit(int64(off1) + int64(off2))) {
4130 break
4131 }
4132 v.reset(OpAMD64BTRLmodify)
4133 v.AuxInt = int32ToAuxInt(off1 + off2)
4134 v.Aux = symToAux(sym)
4135 v.AddArg3(base, val, mem)
4136 return true
4137 }
4138
4139
4140
4141 for {
4142 off1 := auxIntToInt32(v.AuxInt)
4143 sym1 := auxToSym(v.Aux)
4144 if v_0.Op != OpAMD64LEAQ {
4145 break
4146 }
4147 off2 := auxIntToInt32(v_0.AuxInt)
4148 sym2 := auxToSym(v_0.Aux)
4149 base := v_0.Args[0]
4150 val := v_1
4151 mem := v_2
4152 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4153 break
4154 }
4155 v.reset(OpAMD64BTRLmodify)
4156 v.AuxInt = int32ToAuxInt(off1 + off2)
4157 v.Aux = symToAux(mergeSym(sym1, sym2))
4158 v.AddArg3(base, val, mem)
4159 return true
4160 }
4161 return false
4162 }
4163 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4164 v_0 := v.Args[0]
4165
4166
4167 for {
4168 c := auxIntToInt8(v.AuxInt)
4169 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
4170 break
4171 }
4172 x := v_0.Args[0]
4173 v.reset(OpAMD64BTRQconst)
4174 v.AuxInt = int8ToAuxInt(c)
4175 v.AddArg(x)
4176 return true
4177 }
4178
4179
4180 for {
4181 c := auxIntToInt8(v.AuxInt)
4182 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4183 break
4184 }
4185 x := v_0.Args[0]
4186 v.reset(OpAMD64BTRQconst)
4187 v.AuxInt = int8ToAuxInt(c)
4188 v.AddArg(x)
4189 return true
4190 }
4191
4192
4193
4194 for {
4195 c := auxIntToInt8(v.AuxInt)
4196 if v_0.Op != OpAMD64ANDQconst {
4197 break
4198 }
4199 d := auxIntToInt32(v_0.AuxInt)
4200 x := v_0.Args[0]
4201 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4202 break
4203 }
4204 v.reset(OpAMD64ANDQconst)
4205 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4206 v.AddArg(x)
4207 return true
4208 }
4209
4210
4211
4212 for {
4213 c := auxIntToInt8(v.AuxInt)
4214 if v_0.Op != OpAMD64BTRQconst {
4215 break
4216 }
4217 d := auxIntToInt8(v_0.AuxInt)
4218 x := v_0.Args[0]
4219 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4220 break
4221 }
4222 v.reset(OpAMD64ANDQconst)
4223 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4224 v.AddArg(x)
4225 return true
4226 }
4227
4228
4229 for {
4230 c := auxIntToInt8(v.AuxInt)
4231 if v_0.Op != OpAMD64MOVQconst {
4232 break
4233 }
4234 d := auxIntToInt64(v_0.AuxInt)
4235 v.reset(OpAMD64MOVQconst)
4236 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4237 return true
4238 }
4239 return false
4240 }
4241 func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
4242 v_1 := v.Args[1]
4243 v_0 := v.Args[0]
4244
4245
4246
4247 for {
4248 valoff1 := auxIntToValAndOff(v.AuxInt)
4249 sym := auxToSym(v.Aux)
4250 if v_0.Op != OpAMD64ADDQconst {
4251 break
4252 }
4253 off2 := auxIntToInt32(v_0.AuxInt)
4254 base := v_0.Args[0]
4255 mem := v_1
4256 if !(ValAndOff(valoff1).canAdd32(off2)) {
4257 break
4258 }
4259 v.reset(OpAMD64BTRQconstmodify)
4260 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4261 v.Aux = symToAux(sym)
4262 v.AddArg2(base, mem)
4263 return true
4264 }
4265
4266
4267
4268 for {
4269 valoff1 := auxIntToValAndOff(v.AuxInt)
4270 sym1 := auxToSym(v.Aux)
4271 if v_0.Op != OpAMD64LEAQ {
4272 break
4273 }
4274 off2 := auxIntToInt32(v_0.AuxInt)
4275 sym2 := auxToSym(v_0.Aux)
4276 base := v_0.Args[0]
4277 mem := v_1
4278 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4279 break
4280 }
4281 v.reset(OpAMD64BTRQconstmodify)
4282 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4283 v.Aux = symToAux(mergeSym(sym1, sym2))
4284 v.AddArg2(base, mem)
4285 return true
4286 }
4287 return false
4288 }
4289 func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
4290 v_2 := v.Args[2]
4291 v_1 := v.Args[1]
4292 v_0 := v.Args[0]
4293
4294
4295
4296 for {
4297 off1 := auxIntToInt32(v.AuxInt)
4298 sym := auxToSym(v.Aux)
4299 if v_0.Op != OpAMD64ADDQconst {
4300 break
4301 }
4302 off2 := auxIntToInt32(v_0.AuxInt)
4303 base := v_0.Args[0]
4304 val := v_1
4305 mem := v_2
4306 if !(is32Bit(int64(off1) + int64(off2))) {
4307 break
4308 }
4309 v.reset(OpAMD64BTRQmodify)
4310 v.AuxInt = int32ToAuxInt(off1 + off2)
4311 v.Aux = symToAux(sym)
4312 v.AddArg3(base, val, mem)
4313 return true
4314 }
4315
4316
4317
4318 for {
4319 off1 := auxIntToInt32(v.AuxInt)
4320 sym1 := auxToSym(v.Aux)
4321 if v_0.Op != OpAMD64LEAQ {
4322 break
4323 }
4324 off2 := auxIntToInt32(v_0.AuxInt)
4325 sym2 := auxToSym(v_0.Aux)
4326 base := v_0.Args[0]
4327 val := v_1
4328 mem := v_2
4329 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4330 break
4331 }
4332 v.reset(OpAMD64BTRQmodify)
4333 v.AuxInt = int32ToAuxInt(off1 + off2)
4334 v.Aux = symToAux(mergeSym(sym1, sym2))
4335 v.AddArg3(base, val, mem)
4336 return true
4337 }
4338 return false
4339 }
4340 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4341 v_0 := v.Args[0]
4342
4343
4344 for {
4345 c := auxIntToInt8(v.AuxInt)
4346 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4347 break
4348 }
4349 x := v_0.Args[0]
4350 v.reset(OpAMD64BTSLconst)
4351 v.AuxInt = int8ToAuxInt(c)
4352 v.AddArg(x)
4353 return true
4354 }
4355
4356
4357 for {
4358 c := auxIntToInt8(v.AuxInt)
4359 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4360 break
4361 }
4362 x := v_0.Args[0]
4363 v.reset(OpAMD64BTSLconst)
4364 v.AuxInt = int8ToAuxInt(c)
4365 v.AddArg(x)
4366 return true
4367 }
4368
4369
4370 for {
4371 c := auxIntToInt8(v.AuxInt)
4372 if v_0.Op != OpAMD64ORLconst {
4373 break
4374 }
4375 d := auxIntToInt32(v_0.AuxInt)
4376 x := v_0.Args[0]
4377 v.reset(OpAMD64ORLconst)
4378 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4379 v.AddArg(x)
4380 return true
4381 }
4382
4383
4384 for {
4385 c := auxIntToInt8(v.AuxInt)
4386 if v_0.Op != OpAMD64BTSLconst {
4387 break
4388 }
4389 d := auxIntToInt8(v_0.AuxInt)
4390 x := v_0.Args[0]
4391 v.reset(OpAMD64ORLconst)
4392 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4393 v.AddArg(x)
4394 return true
4395 }
4396
4397
4398 for {
4399 c := auxIntToInt8(v.AuxInt)
4400 if v_0.Op != OpAMD64MOVLconst {
4401 break
4402 }
4403 d := auxIntToInt32(v_0.AuxInt)
4404 v.reset(OpAMD64MOVLconst)
4405 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4406 return true
4407 }
4408 return false
4409 }
4410 func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
4411 v_1 := v.Args[1]
4412 v_0 := v.Args[0]
4413
4414
4415
4416 for {
4417 valoff1 := auxIntToValAndOff(v.AuxInt)
4418 sym := auxToSym(v.Aux)
4419 if v_0.Op != OpAMD64ADDQconst {
4420 break
4421 }
4422 off2 := auxIntToInt32(v_0.AuxInt)
4423 base := v_0.Args[0]
4424 mem := v_1
4425 if !(ValAndOff(valoff1).canAdd32(off2)) {
4426 break
4427 }
4428 v.reset(OpAMD64BTSLconstmodify)
4429 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4430 v.Aux = symToAux(sym)
4431 v.AddArg2(base, mem)
4432 return true
4433 }
4434
4435
4436
4437 for {
4438 valoff1 := auxIntToValAndOff(v.AuxInt)
4439 sym1 := auxToSym(v.Aux)
4440 if v_0.Op != OpAMD64LEAQ {
4441 break
4442 }
4443 off2 := auxIntToInt32(v_0.AuxInt)
4444 sym2 := auxToSym(v_0.Aux)
4445 base := v_0.Args[0]
4446 mem := v_1
4447 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4448 break
4449 }
4450 v.reset(OpAMD64BTSLconstmodify)
4451 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4452 v.Aux = symToAux(mergeSym(sym1, sym2))
4453 v.AddArg2(base, mem)
4454 return true
4455 }
4456 return false
4457 }
4458 func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
4459 v_2 := v.Args[2]
4460 v_1 := v.Args[1]
4461 v_0 := v.Args[0]
4462
4463
4464
4465 for {
4466 off1 := auxIntToInt32(v.AuxInt)
4467 sym := auxToSym(v.Aux)
4468 if v_0.Op != OpAMD64ADDQconst {
4469 break
4470 }
4471 off2 := auxIntToInt32(v_0.AuxInt)
4472 base := v_0.Args[0]
4473 val := v_1
4474 mem := v_2
4475 if !(is32Bit(int64(off1) + int64(off2))) {
4476 break
4477 }
4478 v.reset(OpAMD64BTSLmodify)
4479 v.AuxInt = int32ToAuxInt(off1 + off2)
4480 v.Aux = symToAux(sym)
4481 v.AddArg3(base, val, mem)
4482 return true
4483 }
4484
4485
4486
4487 for {
4488 off1 := auxIntToInt32(v.AuxInt)
4489 sym1 := auxToSym(v.Aux)
4490 if v_0.Op != OpAMD64LEAQ {
4491 break
4492 }
4493 off2 := auxIntToInt32(v_0.AuxInt)
4494 sym2 := auxToSym(v_0.Aux)
4495 base := v_0.Args[0]
4496 val := v_1
4497 mem := v_2
4498 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4499 break
4500 }
4501 v.reset(OpAMD64BTSLmodify)
4502 v.AuxInt = int32ToAuxInt(off1 + off2)
4503 v.Aux = symToAux(mergeSym(sym1, sym2))
4504 v.AddArg3(base, val, mem)
4505 return true
4506 }
4507 return false
4508 }
4509 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4510 v_0 := v.Args[0]
4511
4512
4513 for {
4514 c := auxIntToInt8(v.AuxInt)
4515 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4516 break
4517 }
4518 x := v_0.Args[0]
4519 v.reset(OpAMD64BTSQconst)
4520 v.AuxInt = int8ToAuxInt(c)
4521 v.AddArg(x)
4522 return true
4523 }
4524
4525
4526 for {
4527 c := auxIntToInt8(v.AuxInt)
4528 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4529 break
4530 }
4531 x := v_0.Args[0]
4532 v.reset(OpAMD64BTSQconst)
4533 v.AuxInt = int8ToAuxInt(c)
4534 v.AddArg(x)
4535 return true
4536 }
4537
4538
4539
4540 for {
4541 c := auxIntToInt8(v.AuxInt)
4542 if v_0.Op != OpAMD64ORQconst {
4543 break
4544 }
4545 d := auxIntToInt32(v_0.AuxInt)
4546 x := v_0.Args[0]
4547 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4548 break
4549 }
4550 v.reset(OpAMD64ORQconst)
4551 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4552 v.AddArg(x)
4553 return true
4554 }
4555
4556
4557
4558 for {
4559 c := auxIntToInt8(v.AuxInt)
4560 if v_0.Op != OpAMD64BTSQconst {
4561 break
4562 }
4563 d := auxIntToInt8(v_0.AuxInt)
4564 x := v_0.Args[0]
4565 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4566 break
4567 }
4568 v.reset(OpAMD64ORQconst)
4569 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4570 v.AddArg(x)
4571 return true
4572 }
4573
4574
4575 for {
4576 c := auxIntToInt8(v.AuxInt)
4577 if v_0.Op != OpAMD64MOVQconst {
4578 break
4579 }
4580 d := auxIntToInt64(v_0.AuxInt)
4581 v.reset(OpAMD64MOVQconst)
4582 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4583 return true
4584 }
4585 return false
4586 }
4587 func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
4588 v_1 := v.Args[1]
4589 v_0 := v.Args[0]
4590
4591
4592
4593 for {
4594 valoff1 := auxIntToValAndOff(v.AuxInt)
4595 sym := auxToSym(v.Aux)
4596 if v_0.Op != OpAMD64ADDQconst {
4597 break
4598 }
4599 off2 := auxIntToInt32(v_0.AuxInt)
4600 base := v_0.Args[0]
4601 mem := v_1
4602 if !(ValAndOff(valoff1).canAdd32(off2)) {
4603 break
4604 }
4605 v.reset(OpAMD64BTSQconstmodify)
4606 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4607 v.Aux = symToAux(sym)
4608 v.AddArg2(base, mem)
4609 return true
4610 }
4611
4612
4613
4614 for {
4615 valoff1 := auxIntToValAndOff(v.AuxInt)
4616 sym1 := auxToSym(v.Aux)
4617 if v_0.Op != OpAMD64LEAQ {
4618 break
4619 }
4620 off2 := auxIntToInt32(v_0.AuxInt)
4621 sym2 := auxToSym(v_0.Aux)
4622 base := v_0.Args[0]
4623 mem := v_1
4624 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
4625 break
4626 }
4627 v.reset(OpAMD64BTSQconstmodify)
4628 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
4629 v.Aux = symToAux(mergeSym(sym1, sym2))
4630 v.AddArg2(base, mem)
4631 return true
4632 }
4633 return false
4634 }
4635 func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
4636 v_2 := v.Args[2]
4637 v_1 := v.Args[1]
4638 v_0 := v.Args[0]
4639
4640
4641
4642 for {
4643 off1 := auxIntToInt32(v.AuxInt)
4644 sym := auxToSym(v.Aux)
4645 if v_0.Op != OpAMD64ADDQconst {
4646 break
4647 }
4648 off2 := auxIntToInt32(v_0.AuxInt)
4649 base := v_0.Args[0]
4650 val := v_1
4651 mem := v_2
4652 if !(is32Bit(int64(off1) + int64(off2))) {
4653 break
4654 }
4655 v.reset(OpAMD64BTSQmodify)
4656 v.AuxInt = int32ToAuxInt(off1 + off2)
4657 v.Aux = symToAux(sym)
4658 v.AddArg3(base, val, mem)
4659 return true
4660 }
4661
4662
4663
4664 for {
4665 off1 := auxIntToInt32(v.AuxInt)
4666 sym1 := auxToSym(v.Aux)
4667 if v_0.Op != OpAMD64LEAQ {
4668 break
4669 }
4670 off2 := auxIntToInt32(v_0.AuxInt)
4671 sym2 := auxToSym(v_0.Aux)
4672 base := v_0.Args[0]
4673 val := v_1
4674 mem := v_2
4675 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
4676 break
4677 }
4678 v.reset(OpAMD64BTSQmodify)
4679 v.AuxInt = int32ToAuxInt(off1 + off2)
4680 v.Aux = symToAux(mergeSym(sym1, sym2))
4681 v.AddArg3(base, val, mem)
4682 return true
4683 }
4684 return false
4685 }
4686 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4687 v_2 := v.Args[2]
4688 v_1 := v.Args[1]
4689 v_0 := v.Args[0]
4690
4691
4692 for {
4693 x := v_0
4694 y := v_1
4695 if v_2.Op != OpAMD64InvertFlags {
4696 break
4697 }
4698 cond := v_2.Args[0]
4699 v.reset(OpAMD64CMOVLLS)
4700 v.AddArg3(x, y, cond)
4701 return true
4702 }
4703
4704
4705 for {
4706 x := v_1
4707 if v_2.Op != OpAMD64FlagEQ {
4708 break
4709 }
4710 v.copyOf(x)
4711 return true
4712 }
4713
4714
4715 for {
4716 x := v_1
4717 if v_2.Op != OpAMD64FlagGT_UGT {
4718 break
4719 }
4720 v.copyOf(x)
4721 return true
4722 }
4723
4724
4725 for {
4726 y := v_0
4727 if v_2.Op != OpAMD64FlagGT_ULT {
4728 break
4729 }
4730 v.copyOf(y)
4731 return true
4732 }
4733
4734
4735 for {
4736 y := v_0
4737 if v_2.Op != OpAMD64FlagLT_ULT {
4738 break
4739 }
4740 v.copyOf(y)
4741 return true
4742 }
4743
4744
4745 for {
4746 x := v_1
4747 if v_2.Op != OpAMD64FlagLT_UGT {
4748 break
4749 }
4750 v.copyOf(x)
4751 return true
4752 }
4753 return false
4754 }
4755 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4756 v_2 := v.Args[2]
4757 v_1 := v.Args[1]
4758 v_0 := v.Args[0]
4759
4760
4761 for {
4762 x := v_0
4763 y := v_1
4764 if v_2.Op != OpAMD64InvertFlags {
4765 break
4766 }
4767 cond := v_2.Args[0]
4768 v.reset(OpAMD64CMOVLHI)
4769 v.AddArg3(x, y, cond)
4770 return true
4771 }
4772
4773
4774 for {
4775 y := v_0
4776 if v_2.Op != OpAMD64FlagEQ {
4777 break
4778 }
4779 v.copyOf(y)
4780 return true
4781 }
4782
4783
4784 for {
4785 y := v_0
4786 if v_2.Op != OpAMD64FlagGT_UGT {
4787 break
4788 }
4789 v.copyOf(y)
4790 return true
4791 }
4792
4793
4794 for {
4795 x := v_1
4796 if v_2.Op != OpAMD64FlagGT_ULT {
4797 break
4798 }
4799 v.copyOf(x)
4800 return true
4801 }
4802
4803
4804 for {
4805 x := v_1
4806 if v_2.Op != OpAMD64FlagLT_ULT {
4807 break
4808 }
4809 v.copyOf(x)
4810 return true
4811 }
4812
4813
4814 for {
4815 y := v_0
4816 if v_2.Op != OpAMD64FlagLT_UGT {
4817 break
4818 }
4819 v.copyOf(y)
4820 return true
4821 }
4822 return false
4823 }
4824 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4825 v_2 := v.Args[2]
4826 v_1 := v.Args[1]
4827 v_0 := v.Args[0]
4828
4829
4830 for {
4831 x := v_0
4832 y := v_1
4833 if v_2.Op != OpAMD64InvertFlags {
4834 break
4835 }
4836 cond := v_2.Args[0]
4837 v.reset(OpAMD64CMOVLEQ)
4838 v.AddArg3(x, y, cond)
4839 return true
4840 }
4841
4842
4843 for {
4844 x := v_1
4845 if v_2.Op != OpAMD64FlagEQ {
4846 break
4847 }
4848 v.copyOf(x)
4849 return true
4850 }
4851
4852
4853 for {
4854 y := v_0
4855 if v_2.Op != OpAMD64FlagGT_UGT {
4856 break
4857 }
4858 v.copyOf(y)
4859 return true
4860 }
4861
4862
4863 for {
4864 y := v_0
4865 if v_2.Op != OpAMD64FlagGT_ULT {
4866 break
4867 }
4868 v.copyOf(y)
4869 return true
4870 }
4871
4872
4873 for {
4874 y := v_0
4875 if v_2.Op != OpAMD64FlagLT_ULT {
4876 break
4877 }
4878 v.copyOf(y)
4879 return true
4880 }
4881
4882
4883 for {
4884 y := v_0
4885 if v_2.Op != OpAMD64FlagLT_UGT {
4886 break
4887 }
4888 v.copyOf(y)
4889 return true
4890 }
4891 return false
4892 }
4893 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4894 v_2 := v.Args[2]
4895 v_1 := v.Args[1]
4896 v_0 := v.Args[0]
4897
4898
4899 for {
4900 x := v_0
4901 y := v_1
4902 if v_2.Op != OpAMD64InvertFlags {
4903 break
4904 }
4905 cond := v_2.Args[0]
4906 v.reset(OpAMD64CMOVLLE)
4907 v.AddArg3(x, y, cond)
4908 return true
4909 }
4910
4911
4912 for {
4913 x := v_1
4914 if v_2.Op != OpAMD64FlagEQ {
4915 break
4916 }
4917 v.copyOf(x)
4918 return true
4919 }
4920
4921
4922 for {
4923 x := v_1
4924 if v_2.Op != OpAMD64FlagGT_UGT {
4925 break
4926 }
4927 v.copyOf(x)
4928 return true
4929 }
4930
4931
4932 for {
4933 x := v_1
4934 if v_2.Op != OpAMD64FlagGT_ULT {
4935 break
4936 }
4937 v.copyOf(x)
4938 return true
4939 }
4940
4941
4942 for {
4943 y := v_0
4944 if v_2.Op != OpAMD64FlagLT_ULT {
4945 break
4946 }
4947 v.copyOf(y)
4948 return true
4949 }
4950
4951
4952 for {
4953 y := v_0
4954 if v_2.Op != OpAMD64FlagLT_UGT {
4955 break
4956 }
4957 v.copyOf(y)
4958 return true
4959 }
4960 return false
4961 }
4962 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4963 v_2 := v.Args[2]
4964 v_1 := v.Args[1]
4965 v_0 := v.Args[0]
4966
4967
4968 for {
4969 x := v_0
4970 y := v_1
4971 if v_2.Op != OpAMD64InvertFlags {
4972 break
4973 }
4974 cond := v_2.Args[0]
4975 v.reset(OpAMD64CMOVLLT)
4976 v.AddArg3(x, y, cond)
4977 return true
4978 }
4979
4980
4981 for {
4982 y := v_0
4983 if v_2.Op != OpAMD64FlagEQ {
4984 break
4985 }
4986 v.copyOf(y)
4987 return true
4988 }
4989
4990
4991 for {
4992 x := v_1
4993 if v_2.Op != OpAMD64FlagGT_UGT {
4994 break
4995 }
4996 v.copyOf(x)
4997 return true
4998 }
4999
5000
5001 for {
5002 x := v_1
5003 if v_2.Op != OpAMD64FlagGT_ULT {
5004 break
5005 }
5006 v.copyOf(x)
5007 return true
5008 }
5009
5010
5011 for {
5012 y := v_0
5013 if v_2.Op != OpAMD64FlagLT_ULT {
5014 break
5015 }
5016 v.copyOf(y)
5017 return true
5018 }
5019
5020
5021 for {
5022 y := v_0
5023 if v_2.Op != OpAMD64FlagLT_UGT {
5024 break
5025 }
5026 v.copyOf(y)
5027 return true
5028 }
5029 return false
5030 }
5031 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
5032 v_2 := v.Args[2]
5033 v_1 := v.Args[1]
5034 v_0 := v.Args[0]
5035
5036
5037 for {
5038 x := v_0
5039 y := v_1
5040 if v_2.Op != OpAMD64InvertFlags {
5041 break
5042 }
5043 cond := v_2.Args[0]
5044 v.reset(OpAMD64CMOVLCS)
5045 v.AddArg3(x, y, cond)
5046 return true
5047 }
5048
5049
5050 for {
5051 y := v_0
5052 if v_2.Op != OpAMD64FlagEQ {
5053 break
5054 }
5055 v.copyOf(y)
5056 return true
5057 }
5058
5059
5060 for {
5061 x := v_1
5062 if v_2.Op != OpAMD64FlagGT_UGT {
5063 break
5064 }
5065 v.copyOf(x)
5066 return true
5067 }
5068
5069
5070 for {
5071 y := v_0
5072 if v_2.Op != OpAMD64FlagGT_ULT {
5073 break
5074 }
5075 v.copyOf(y)
5076 return true
5077 }
5078
5079
5080 for {
5081 y := v_0
5082 if v_2.Op != OpAMD64FlagLT_ULT {
5083 break
5084 }
5085 v.copyOf(y)
5086 return true
5087 }
5088
5089
5090 for {
5091 x := v_1
5092 if v_2.Op != OpAMD64FlagLT_UGT {
5093 break
5094 }
5095 v.copyOf(x)
5096 return true
5097 }
5098 return false
5099 }
5100 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
5101 v_2 := v.Args[2]
5102 v_1 := v.Args[1]
5103 v_0 := v.Args[0]
5104
5105
5106 for {
5107 x := v_0
5108 y := v_1
5109 if v_2.Op != OpAMD64InvertFlags {
5110 break
5111 }
5112 cond := v_2.Args[0]
5113 v.reset(OpAMD64CMOVLGE)
5114 v.AddArg3(x, y, cond)
5115 return true
5116 }
5117
5118
5119 for {
5120 x := v_1
5121 if v_2.Op != OpAMD64FlagEQ {
5122 break
5123 }
5124 v.copyOf(x)
5125 return true
5126 }
5127
5128
5129 for {
5130 y := v_0
5131 if v_2.Op != OpAMD64FlagGT_UGT {
5132 break
5133 }
5134 v.copyOf(y)
5135 return true
5136 }
5137
5138
5139 for {
5140 y := v_0
5141 if v_2.Op != OpAMD64FlagGT_ULT {
5142 break
5143 }
5144 v.copyOf(y)
5145 return true
5146 }
5147
5148
5149 for {
5150 x := v_1
5151 if v_2.Op != OpAMD64FlagLT_ULT {
5152 break
5153 }
5154 v.copyOf(x)
5155 return true
5156 }
5157
5158
5159 for {
5160 x := v_1
5161 if v_2.Op != OpAMD64FlagLT_UGT {
5162 break
5163 }
5164 v.copyOf(x)
5165 return true
5166 }
5167 return false
5168 }
5169 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
5170 v_2 := v.Args[2]
5171 v_1 := v.Args[1]
5172 v_0 := v.Args[0]
5173
5174
5175 for {
5176 x := v_0
5177 y := v_1
5178 if v_2.Op != OpAMD64InvertFlags {
5179 break
5180 }
5181 cond := v_2.Args[0]
5182 v.reset(OpAMD64CMOVLCC)
5183 v.AddArg3(x, y, cond)
5184 return true
5185 }
5186
5187
5188 for {
5189 x := v_1
5190 if v_2.Op != OpAMD64FlagEQ {
5191 break
5192 }
5193 v.copyOf(x)
5194 return true
5195 }
5196
5197
5198 for {
5199 y := v_0
5200 if v_2.Op != OpAMD64FlagGT_UGT {
5201 break
5202 }
5203 v.copyOf(y)
5204 return true
5205 }
5206
5207
5208 for {
5209 x := v_1
5210 if v_2.Op != OpAMD64FlagGT_ULT {
5211 break
5212 }
5213 v.copyOf(x)
5214 return true
5215 }
5216
5217
5218 for {
5219 x := v_1
5220 if v_2.Op != OpAMD64FlagLT_ULT {
5221 break
5222 }
5223 v.copyOf(x)
5224 return true
5225 }
5226
5227
5228 for {
5229 y := v_0
5230 if v_2.Op != OpAMD64FlagLT_UGT {
5231 break
5232 }
5233 v.copyOf(y)
5234 return true
5235 }
5236 return false
5237 }
5238 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
5239 v_2 := v.Args[2]
5240 v_1 := v.Args[1]
5241 v_0 := v.Args[0]
5242
5243
5244 for {
5245 x := v_0
5246 y := v_1
5247 if v_2.Op != OpAMD64InvertFlags {
5248 break
5249 }
5250 cond := v_2.Args[0]
5251 v.reset(OpAMD64CMOVLGT)
5252 v.AddArg3(x, y, cond)
5253 return true
5254 }
5255
5256
5257 for {
5258 y := v_0
5259 if v_2.Op != OpAMD64FlagEQ {
5260 break
5261 }
5262 v.copyOf(y)
5263 return true
5264 }
5265
5266
5267 for {
5268 y := v_0
5269 if v_2.Op != OpAMD64FlagGT_UGT {
5270 break
5271 }
5272 v.copyOf(y)
5273 return true
5274 }
5275
5276
5277 for {
5278 y := v_0
5279 if v_2.Op != OpAMD64FlagGT_ULT {
5280 break
5281 }
5282 v.copyOf(y)
5283 return true
5284 }
5285
5286
5287 for {
5288 x := v_1
5289 if v_2.Op != OpAMD64FlagLT_ULT {
5290 break
5291 }
5292 v.copyOf(x)
5293 return true
5294 }
5295
5296
5297 for {
5298 x := v_1
5299 if v_2.Op != OpAMD64FlagLT_UGT {
5300 break
5301 }
5302 v.copyOf(x)
5303 return true
5304 }
5305 return false
5306 }
5307 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
5308 v_2 := v.Args[2]
5309 v_1 := v.Args[1]
5310 v_0 := v.Args[0]
5311
5312
5313 for {
5314 x := v_0
5315 y := v_1
5316 if v_2.Op != OpAMD64InvertFlags {
5317 break
5318 }
5319 cond := v_2.Args[0]
5320 v.reset(OpAMD64CMOVLNE)
5321 v.AddArg3(x, y, cond)
5322 return true
5323 }
5324
5325
5326 for {
5327 y := v_0
5328 if v_2.Op != OpAMD64FlagEQ {
5329 break
5330 }
5331 v.copyOf(y)
5332 return true
5333 }
5334
5335
5336 for {
5337 x := v_1
5338 if v_2.Op != OpAMD64FlagGT_UGT {
5339 break
5340 }
5341 v.copyOf(x)
5342 return true
5343 }
5344
5345
5346 for {
5347 x := v_1
5348 if v_2.Op != OpAMD64FlagGT_ULT {
5349 break
5350 }
5351 v.copyOf(x)
5352 return true
5353 }
5354
5355
5356 for {
5357 x := v_1
5358 if v_2.Op != OpAMD64FlagLT_ULT {
5359 break
5360 }
5361 v.copyOf(x)
5362 return true
5363 }
5364
5365
5366 for {
5367 x := v_1
5368 if v_2.Op != OpAMD64FlagLT_UGT {
5369 break
5370 }
5371 v.copyOf(x)
5372 return true
5373 }
5374 return false
5375 }
5376 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
5377 v_2 := v.Args[2]
5378 v_1 := v.Args[1]
5379 v_0 := v.Args[0]
5380
5381
5382 for {
5383 x := v_0
5384 y := v_1
5385 if v_2.Op != OpAMD64InvertFlags {
5386 break
5387 }
5388 cond := v_2.Args[0]
5389 v.reset(OpAMD64CMOVQLS)
5390 v.AddArg3(x, y, cond)
5391 return true
5392 }
5393
5394
5395 for {
5396 x := v_1
5397 if v_2.Op != OpAMD64FlagEQ {
5398 break
5399 }
5400 v.copyOf(x)
5401 return true
5402 }
5403
5404
5405 for {
5406 x := v_1
5407 if v_2.Op != OpAMD64FlagGT_UGT {
5408 break
5409 }
5410 v.copyOf(x)
5411 return true
5412 }
5413
5414
5415 for {
5416 y := v_0
5417 if v_2.Op != OpAMD64FlagGT_ULT {
5418 break
5419 }
5420 v.copyOf(y)
5421 return true
5422 }
5423
5424
5425 for {
5426 y := v_0
5427 if v_2.Op != OpAMD64FlagLT_ULT {
5428 break
5429 }
5430 v.copyOf(y)
5431 return true
5432 }
5433
5434
5435 for {
5436 x := v_1
5437 if v_2.Op != OpAMD64FlagLT_UGT {
5438 break
5439 }
5440 v.copyOf(x)
5441 return true
5442 }
5443 return false
5444 }
5445 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5446 v_2 := v.Args[2]
5447 v_1 := v.Args[1]
5448 v_0 := v.Args[0]
5449
5450
5451 for {
5452 x := v_0
5453 y := v_1
5454 if v_2.Op != OpAMD64InvertFlags {
5455 break
5456 }
5457 cond := v_2.Args[0]
5458 v.reset(OpAMD64CMOVQHI)
5459 v.AddArg3(x, y, cond)
5460 return true
5461 }
5462
5463
5464 for {
5465 y := v_0
5466 if v_2.Op != OpAMD64FlagEQ {
5467 break
5468 }
5469 v.copyOf(y)
5470 return true
5471 }
5472
5473
5474 for {
5475 y := v_0
5476 if v_2.Op != OpAMD64FlagGT_UGT {
5477 break
5478 }
5479 v.copyOf(y)
5480 return true
5481 }
5482
5483
5484 for {
5485 x := v_1
5486 if v_2.Op != OpAMD64FlagGT_ULT {
5487 break
5488 }
5489 v.copyOf(x)
5490 return true
5491 }
5492
5493
5494 for {
5495 x := v_1
5496 if v_2.Op != OpAMD64FlagLT_ULT {
5497 break
5498 }
5499 v.copyOf(x)
5500 return true
5501 }
5502
5503
5504 for {
5505 y := v_0
5506 if v_2.Op != OpAMD64FlagLT_UGT {
5507 break
5508 }
5509 v.copyOf(y)
5510 return true
5511 }
5512 return false
5513 }
5514 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5515 v_2 := v.Args[2]
5516 v_1 := v.Args[1]
5517 v_0 := v.Args[0]
5518
5519
5520 for {
5521 x := v_0
5522 y := v_1
5523 if v_2.Op != OpAMD64InvertFlags {
5524 break
5525 }
5526 cond := v_2.Args[0]
5527 v.reset(OpAMD64CMOVQEQ)
5528 v.AddArg3(x, y, cond)
5529 return true
5530 }
5531
5532
5533 for {
5534 x := v_1
5535 if v_2.Op != OpAMD64FlagEQ {
5536 break
5537 }
5538 v.copyOf(x)
5539 return true
5540 }
5541
5542
5543 for {
5544 y := v_0
5545 if v_2.Op != OpAMD64FlagGT_UGT {
5546 break
5547 }
5548 v.copyOf(y)
5549 return true
5550 }
5551
5552
5553 for {
5554 y := v_0
5555 if v_2.Op != OpAMD64FlagGT_ULT {
5556 break
5557 }
5558 v.copyOf(y)
5559 return true
5560 }
5561
5562
5563 for {
5564 y := v_0
5565 if v_2.Op != OpAMD64FlagLT_ULT {
5566 break
5567 }
5568 v.copyOf(y)
5569 return true
5570 }
5571
5572
5573 for {
5574 y := v_0
5575 if v_2.Op != OpAMD64FlagLT_UGT {
5576 break
5577 }
5578 v.copyOf(y)
5579 return true
5580 }
5581
5582
5583
5584 for {
5585 x := v_0
5586 if v_2.Op != OpSelect1 {
5587 break
5588 }
5589 v_2_0 := v_2.Args[0]
5590 if v_2_0.Op != OpAMD64BSFQ {
5591 break
5592 }
5593 v_2_0_0 := v_2_0.Args[0]
5594 if v_2_0_0.Op != OpAMD64ORQconst {
5595 break
5596 }
5597 c := auxIntToInt32(v_2_0_0.AuxInt)
5598 if !(c != 0) {
5599 break
5600 }
5601 v.copyOf(x)
5602 return true
5603 }
5604 return false
5605 }
5606 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5607 v_2 := v.Args[2]
5608 v_1 := v.Args[1]
5609 v_0 := v.Args[0]
5610
5611
5612 for {
5613 x := v_0
5614 y := v_1
5615 if v_2.Op != OpAMD64InvertFlags {
5616 break
5617 }
5618 cond := v_2.Args[0]
5619 v.reset(OpAMD64CMOVQLE)
5620 v.AddArg3(x, y, cond)
5621 return true
5622 }
5623
5624
5625 for {
5626 x := v_1
5627 if v_2.Op != OpAMD64FlagEQ {
5628 break
5629 }
5630 v.copyOf(x)
5631 return true
5632 }
5633
5634
5635 for {
5636 x := v_1
5637 if v_2.Op != OpAMD64FlagGT_UGT {
5638 break
5639 }
5640 v.copyOf(x)
5641 return true
5642 }
5643
5644
5645 for {
5646 x := v_1
5647 if v_2.Op != OpAMD64FlagGT_ULT {
5648 break
5649 }
5650 v.copyOf(x)
5651 return true
5652 }
5653
5654
5655 for {
5656 y := v_0
5657 if v_2.Op != OpAMD64FlagLT_ULT {
5658 break
5659 }
5660 v.copyOf(y)
5661 return true
5662 }
5663
5664
5665 for {
5666 y := v_0
5667 if v_2.Op != OpAMD64FlagLT_UGT {
5668 break
5669 }
5670 v.copyOf(y)
5671 return true
5672 }
5673 return false
5674 }
5675 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5676 v_2 := v.Args[2]
5677 v_1 := v.Args[1]
5678 v_0 := v.Args[0]
5679
5680
5681 for {
5682 x := v_0
5683 y := v_1
5684 if v_2.Op != OpAMD64InvertFlags {
5685 break
5686 }
5687 cond := v_2.Args[0]
5688 v.reset(OpAMD64CMOVQLT)
5689 v.AddArg3(x, y, cond)
5690 return true
5691 }
5692
5693
5694 for {
5695 y := v_0
5696 if v_2.Op != OpAMD64FlagEQ {
5697 break
5698 }
5699 v.copyOf(y)
5700 return true
5701 }
5702
5703
5704 for {
5705 x := v_1
5706 if v_2.Op != OpAMD64FlagGT_UGT {
5707 break
5708 }
5709 v.copyOf(x)
5710 return true
5711 }
5712
5713
5714 for {
5715 x := v_1
5716 if v_2.Op != OpAMD64FlagGT_ULT {
5717 break
5718 }
5719 v.copyOf(x)
5720 return true
5721 }
5722
5723
5724 for {
5725 y := v_0
5726 if v_2.Op != OpAMD64FlagLT_ULT {
5727 break
5728 }
5729 v.copyOf(y)
5730 return true
5731 }
5732
5733
5734 for {
5735 y := v_0
5736 if v_2.Op != OpAMD64FlagLT_UGT {
5737 break
5738 }
5739 v.copyOf(y)
5740 return true
5741 }
5742 return false
5743 }
5744 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5745 v_2 := v.Args[2]
5746 v_1 := v.Args[1]
5747 v_0 := v.Args[0]
5748
5749
5750 for {
5751 x := v_0
5752 y := v_1
5753 if v_2.Op != OpAMD64InvertFlags {
5754 break
5755 }
5756 cond := v_2.Args[0]
5757 v.reset(OpAMD64CMOVQCS)
5758 v.AddArg3(x, y, cond)
5759 return true
5760 }
5761
5762
5763 for {
5764 y := v_0
5765 if v_2.Op != OpAMD64FlagEQ {
5766 break
5767 }
5768 v.copyOf(y)
5769 return true
5770 }
5771
5772
5773 for {
5774 x := v_1
5775 if v_2.Op != OpAMD64FlagGT_UGT {
5776 break
5777 }
5778 v.copyOf(x)
5779 return true
5780 }
5781
5782
5783 for {
5784 y := v_0
5785 if v_2.Op != OpAMD64FlagGT_ULT {
5786 break
5787 }
5788 v.copyOf(y)
5789 return true
5790 }
5791
5792
5793 for {
5794 y := v_0
5795 if v_2.Op != OpAMD64FlagLT_ULT {
5796 break
5797 }
5798 v.copyOf(y)
5799 return true
5800 }
5801
5802
5803 for {
5804 x := v_1
5805 if v_2.Op != OpAMD64FlagLT_UGT {
5806 break
5807 }
5808 v.copyOf(x)
5809 return true
5810 }
5811 return false
5812 }
5813 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5814 v_2 := v.Args[2]
5815 v_1 := v.Args[1]
5816 v_0 := v.Args[0]
5817
5818
5819 for {
5820 x := v_0
5821 y := v_1
5822 if v_2.Op != OpAMD64InvertFlags {
5823 break
5824 }
5825 cond := v_2.Args[0]
5826 v.reset(OpAMD64CMOVQGE)
5827 v.AddArg3(x, y, cond)
5828 return true
5829 }
5830
5831
5832 for {
5833 x := v_1
5834 if v_2.Op != OpAMD64FlagEQ {
5835 break
5836 }
5837 v.copyOf(x)
5838 return true
5839 }
5840
5841
5842 for {
5843 y := v_0
5844 if v_2.Op != OpAMD64FlagGT_UGT {
5845 break
5846 }
5847 v.copyOf(y)
5848 return true
5849 }
5850
5851
5852 for {
5853 y := v_0
5854 if v_2.Op != OpAMD64FlagGT_ULT {
5855 break
5856 }
5857 v.copyOf(y)
5858 return true
5859 }
5860
5861
5862 for {
5863 x := v_1
5864 if v_2.Op != OpAMD64FlagLT_ULT {
5865 break
5866 }
5867 v.copyOf(x)
5868 return true
5869 }
5870
5871
5872 for {
5873 x := v_1
5874 if v_2.Op != OpAMD64FlagLT_UGT {
5875 break
5876 }
5877 v.copyOf(x)
5878 return true
5879 }
5880 return false
5881 }
5882 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5883 v_2 := v.Args[2]
5884 v_1 := v.Args[1]
5885 v_0 := v.Args[0]
5886
5887
5888 for {
5889 x := v_0
5890 y := v_1
5891 if v_2.Op != OpAMD64InvertFlags {
5892 break
5893 }
5894 cond := v_2.Args[0]
5895 v.reset(OpAMD64CMOVQCC)
5896 v.AddArg3(x, y, cond)
5897 return true
5898 }
5899
5900
5901 for {
5902 x := v_1
5903 if v_2.Op != OpAMD64FlagEQ {
5904 break
5905 }
5906 v.copyOf(x)
5907 return true
5908 }
5909
5910
5911 for {
5912 y := v_0
5913 if v_2.Op != OpAMD64FlagGT_UGT {
5914 break
5915 }
5916 v.copyOf(y)
5917 return true
5918 }
5919
5920
5921 for {
5922 x := v_1
5923 if v_2.Op != OpAMD64FlagGT_ULT {
5924 break
5925 }
5926 v.copyOf(x)
5927 return true
5928 }
5929
5930
5931 for {
5932 x := v_1
5933 if v_2.Op != OpAMD64FlagLT_ULT {
5934 break
5935 }
5936 v.copyOf(x)
5937 return true
5938 }
5939
5940
5941 for {
5942 y := v_0
5943 if v_2.Op != OpAMD64FlagLT_UGT {
5944 break
5945 }
5946 v.copyOf(y)
5947 return true
5948 }
5949 return false
5950 }
5951 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5952 v_2 := v.Args[2]
5953 v_1 := v.Args[1]
5954 v_0 := v.Args[0]
5955
5956
5957 for {
5958 x := v_0
5959 y := v_1
5960 if v_2.Op != OpAMD64InvertFlags {
5961 break
5962 }
5963 cond := v_2.Args[0]
5964 v.reset(OpAMD64CMOVQGT)
5965 v.AddArg3(x, y, cond)
5966 return true
5967 }
5968
5969
5970 for {
5971 y := v_0
5972 if v_2.Op != OpAMD64FlagEQ {
5973 break
5974 }
5975 v.copyOf(y)
5976 return true
5977 }
5978
5979
5980 for {
5981 y := v_0
5982 if v_2.Op != OpAMD64FlagGT_UGT {
5983 break
5984 }
5985 v.copyOf(y)
5986 return true
5987 }
5988
5989
5990 for {
5991 y := v_0
5992 if v_2.Op != OpAMD64FlagGT_ULT {
5993 break
5994 }
5995 v.copyOf(y)
5996 return true
5997 }
5998
5999
6000 for {
6001 x := v_1
6002 if v_2.Op != OpAMD64FlagLT_ULT {
6003 break
6004 }
6005 v.copyOf(x)
6006 return true
6007 }
6008
6009
6010 for {
6011 x := v_1
6012 if v_2.Op != OpAMD64FlagLT_UGT {
6013 break
6014 }
6015 v.copyOf(x)
6016 return true
6017 }
6018 return false
6019 }
6020 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
6021 v_2 := v.Args[2]
6022 v_1 := v.Args[1]
6023 v_0 := v.Args[0]
6024
6025
6026 for {
6027 x := v_0
6028 y := v_1
6029 if v_2.Op != OpAMD64InvertFlags {
6030 break
6031 }
6032 cond := v_2.Args[0]
6033 v.reset(OpAMD64CMOVQNE)
6034 v.AddArg3(x, y, cond)
6035 return true
6036 }
6037
6038
6039 for {
6040 y := v_0
6041 if v_2.Op != OpAMD64FlagEQ {
6042 break
6043 }
6044 v.copyOf(y)
6045 return true
6046 }
6047
6048
6049 for {
6050 x := v_1
6051 if v_2.Op != OpAMD64FlagGT_UGT {
6052 break
6053 }
6054 v.copyOf(x)
6055 return true
6056 }
6057
6058
6059 for {
6060 x := v_1
6061 if v_2.Op != OpAMD64FlagGT_ULT {
6062 break
6063 }
6064 v.copyOf(x)
6065 return true
6066 }
6067
6068
6069 for {
6070 x := v_1
6071 if v_2.Op != OpAMD64FlagLT_ULT {
6072 break
6073 }
6074 v.copyOf(x)
6075 return true
6076 }
6077
6078
6079 for {
6080 x := v_1
6081 if v_2.Op != OpAMD64FlagLT_UGT {
6082 break
6083 }
6084 v.copyOf(x)
6085 return true
6086 }
6087 return false
6088 }
6089 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
6090 v_2 := v.Args[2]
6091 v_1 := v.Args[1]
6092 v_0 := v.Args[0]
6093
6094
6095 for {
6096 x := v_0
6097 y := v_1
6098 if v_2.Op != OpAMD64InvertFlags {
6099 break
6100 }
6101 cond := v_2.Args[0]
6102 v.reset(OpAMD64CMOVWLS)
6103 v.AddArg3(x, y, cond)
6104 return true
6105 }
6106
6107
6108 for {
6109 x := v_1
6110 if v_2.Op != OpAMD64FlagEQ {
6111 break
6112 }
6113 v.copyOf(x)
6114 return true
6115 }
6116
6117
6118 for {
6119 x := v_1
6120 if v_2.Op != OpAMD64FlagGT_UGT {
6121 break
6122 }
6123 v.copyOf(x)
6124 return true
6125 }
6126
6127
6128 for {
6129 y := v_0
6130 if v_2.Op != OpAMD64FlagGT_ULT {
6131 break
6132 }
6133 v.copyOf(y)
6134 return true
6135 }
6136
6137
6138 for {
6139 y := v_0
6140 if v_2.Op != OpAMD64FlagLT_ULT {
6141 break
6142 }
6143 v.copyOf(y)
6144 return true
6145 }
6146
6147
6148 for {
6149 x := v_1
6150 if v_2.Op != OpAMD64FlagLT_UGT {
6151 break
6152 }
6153 v.copyOf(x)
6154 return true
6155 }
6156 return false
6157 }
6158 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
6159 v_2 := v.Args[2]
6160 v_1 := v.Args[1]
6161 v_0 := v.Args[0]
6162
6163
6164 for {
6165 x := v_0
6166 y := v_1
6167 if v_2.Op != OpAMD64InvertFlags {
6168 break
6169 }
6170 cond := v_2.Args[0]
6171 v.reset(OpAMD64CMOVWHI)
6172 v.AddArg3(x, y, cond)
6173 return true
6174 }
6175
6176
6177 for {
6178 y := v_0
6179 if v_2.Op != OpAMD64FlagEQ {
6180 break
6181 }
6182 v.copyOf(y)
6183 return true
6184 }
6185
6186
6187 for {
6188 y := v_0
6189 if v_2.Op != OpAMD64FlagGT_UGT {
6190 break
6191 }
6192 v.copyOf(y)
6193 return true
6194 }
6195
6196
6197 for {
6198 x := v_1
6199 if v_2.Op != OpAMD64FlagGT_ULT {
6200 break
6201 }
6202 v.copyOf(x)
6203 return true
6204 }
6205
6206
6207 for {
6208 x := v_1
6209 if v_2.Op != OpAMD64FlagLT_ULT {
6210 break
6211 }
6212 v.copyOf(x)
6213 return true
6214 }
6215
6216
6217 for {
6218 y := v_0
6219 if v_2.Op != OpAMD64FlagLT_UGT {
6220 break
6221 }
6222 v.copyOf(y)
6223 return true
6224 }
6225 return false
6226 }
6227 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
6228 v_2 := v.Args[2]
6229 v_1 := v.Args[1]
6230 v_0 := v.Args[0]
6231
6232
6233 for {
6234 x := v_0
6235 y := v_1
6236 if v_2.Op != OpAMD64InvertFlags {
6237 break
6238 }
6239 cond := v_2.Args[0]
6240 v.reset(OpAMD64CMOVWEQ)
6241 v.AddArg3(x, y, cond)
6242 return true
6243 }
6244
6245
6246 for {
6247 x := v_1
6248 if v_2.Op != OpAMD64FlagEQ {
6249 break
6250 }
6251 v.copyOf(x)
6252 return true
6253 }
6254
6255
6256 for {
6257 y := v_0
6258 if v_2.Op != OpAMD64FlagGT_UGT {
6259 break
6260 }
6261 v.copyOf(y)
6262 return true
6263 }
6264
6265
6266 for {
6267 y := v_0
6268 if v_2.Op != OpAMD64FlagGT_ULT {
6269 break
6270 }
6271 v.copyOf(y)
6272 return true
6273 }
6274
6275
6276 for {
6277 y := v_0
6278 if v_2.Op != OpAMD64FlagLT_ULT {
6279 break
6280 }
6281 v.copyOf(y)
6282 return true
6283 }
6284
6285
6286 for {
6287 y := v_0
6288 if v_2.Op != OpAMD64FlagLT_UGT {
6289 break
6290 }
6291 v.copyOf(y)
6292 return true
6293 }
6294 return false
6295 }
6296 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6297 v_2 := v.Args[2]
6298 v_1 := v.Args[1]
6299 v_0 := v.Args[0]
6300
6301
6302 for {
6303 x := v_0
6304 y := v_1
6305 if v_2.Op != OpAMD64InvertFlags {
6306 break
6307 }
6308 cond := v_2.Args[0]
6309 v.reset(OpAMD64CMOVWLE)
6310 v.AddArg3(x, y, cond)
6311 return true
6312 }
6313
6314
6315 for {
6316 x := v_1
6317 if v_2.Op != OpAMD64FlagEQ {
6318 break
6319 }
6320 v.copyOf(x)
6321 return true
6322 }
6323
6324
6325 for {
6326 x := v_1
6327 if v_2.Op != OpAMD64FlagGT_UGT {
6328 break
6329 }
6330 v.copyOf(x)
6331 return true
6332 }
6333
6334
6335 for {
6336 x := v_1
6337 if v_2.Op != OpAMD64FlagGT_ULT {
6338 break
6339 }
6340 v.copyOf(x)
6341 return true
6342 }
6343
6344
6345 for {
6346 y := v_0
6347 if v_2.Op != OpAMD64FlagLT_ULT {
6348 break
6349 }
6350 v.copyOf(y)
6351 return true
6352 }
6353
6354
6355 for {
6356 y := v_0
6357 if v_2.Op != OpAMD64FlagLT_UGT {
6358 break
6359 }
6360 v.copyOf(y)
6361 return true
6362 }
6363 return false
6364 }
6365 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6366 v_2 := v.Args[2]
6367 v_1 := v.Args[1]
6368 v_0 := v.Args[0]
6369
6370
6371 for {
6372 x := v_0
6373 y := v_1
6374 if v_2.Op != OpAMD64InvertFlags {
6375 break
6376 }
6377 cond := v_2.Args[0]
6378 v.reset(OpAMD64CMOVWLT)
6379 v.AddArg3(x, y, cond)
6380 return true
6381 }
6382
6383
6384 for {
6385 y := v_0
6386 if v_2.Op != OpAMD64FlagEQ {
6387 break
6388 }
6389 v.copyOf(y)
6390 return true
6391 }
6392
6393
6394 for {
6395 x := v_1
6396 if v_2.Op != OpAMD64FlagGT_UGT {
6397 break
6398 }
6399 v.copyOf(x)
6400 return true
6401 }
6402
6403
6404 for {
6405 x := v_1
6406 if v_2.Op != OpAMD64FlagGT_ULT {
6407 break
6408 }
6409 v.copyOf(x)
6410 return true
6411 }
6412
6413
6414 for {
6415 y := v_0
6416 if v_2.Op != OpAMD64FlagLT_ULT {
6417 break
6418 }
6419 v.copyOf(y)
6420 return true
6421 }
6422
6423
6424 for {
6425 y := v_0
6426 if v_2.Op != OpAMD64FlagLT_UGT {
6427 break
6428 }
6429 v.copyOf(y)
6430 return true
6431 }
6432 return false
6433 }
6434 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6435 v_2 := v.Args[2]
6436 v_1 := v.Args[1]
6437 v_0 := v.Args[0]
6438
6439
6440 for {
6441 x := v_0
6442 y := v_1
6443 if v_2.Op != OpAMD64InvertFlags {
6444 break
6445 }
6446 cond := v_2.Args[0]
6447 v.reset(OpAMD64CMOVWCS)
6448 v.AddArg3(x, y, cond)
6449 return true
6450 }
6451
6452
6453 for {
6454 y := v_0
6455 if v_2.Op != OpAMD64FlagEQ {
6456 break
6457 }
6458 v.copyOf(y)
6459 return true
6460 }
6461
6462
6463 for {
6464 x := v_1
6465 if v_2.Op != OpAMD64FlagGT_UGT {
6466 break
6467 }
6468 v.copyOf(x)
6469 return true
6470 }
6471
6472
6473 for {
6474 y := v_0
6475 if v_2.Op != OpAMD64FlagGT_ULT {
6476 break
6477 }
6478 v.copyOf(y)
6479 return true
6480 }
6481
6482
6483 for {
6484 y := v_0
6485 if v_2.Op != OpAMD64FlagLT_ULT {
6486 break
6487 }
6488 v.copyOf(y)
6489 return true
6490 }
6491
6492
6493 for {
6494 x := v_1
6495 if v_2.Op != OpAMD64FlagLT_UGT {
6496 break
6497 }
6498 v.copyOf(x)
6499 return true
6500 }
6501 return false
6502 }
6503 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6504 v_2 := v.Args[2]
6505 v_1 := v.Args[1]
6506 v_0 := v.Args[0]
6507
6508
6509 for {
6510 x := v_0
6511 y := v_1
6512 if v_2.Op != OpAMD64InvertFlags {
6513 break
6514 }
6515 cond := v_2.Args[0]
6516 v.reset(OpAMD64CMOVWGE)
6517 v.AddArg3(x, y, cond)
6518 return true
6519 }
6520
6521
6522 for {
6523 x := v_1
6524 if v_2.Op != OpAMD64FlagEQ {
6525 break
6526 }
6527 v.copyOf(x)
6528 return true
6529 }
6530
6531
6532 for {
6533 y := v_0
6534 if v_2.Op != OpAMD64FlagGT_UGT {
6535 break
6536 }
6537 v.copyOf(y)
6538 return true
6539 }
6540
6541
6542 for {
6543 y := v_0
6544 if v_2.Op != OpAMD64FlagGT_ULT {
6545 break
6546 }
6547 v.copyOf(y)
6548 return true
6549 }
6550
6551
6552 for {
6553 x := v_1
6554 if v_2.Op != OpAMD64FlagLT_ULT {
6555 break
6556 }
6557 v.copyOf(x)
6558 return true
6559 }
6560
6561
6562 for {
6563 x := v_1
6564 if v_2.Op != OpAMD64FlagLT_UGT {
6565 break
6566 }
6567 v.copyOf(x)
6568 return true
6569 }
6570 return false
6571 }
6572 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6573 v_2 := v.Args[2]
6574 v_1 := v.Args[1]
6575 v_0 := v.Args[0]
6576
6577
6578 for {
6579 x := v_0
6580 y := v_1
6581 if v_2.Op != OpAMD64InvertFlags {
6582 break
6583 }
6584 cond := v_2.Args[0]
6585 v.reset(OpAMD64CMOVWCC)
6586 v.AddArg3(x, y, cond)
6587 return true
6588 }
6589
6590
6591 for {
6592 x := v_1
6593 if v_2.Op != OpAMD64FlagEQ {
6594 break
6595 }
6596 v.copyOf(x)
6597 return true
6598 }
6599
6600
6601 for {
6602 y := v_0
6603 if v_2.Op != OpAMD64FlagGT_UGT {
6604 break
6605 }
6606 v.copyOf(y)
6607 return true
6608 }
6609
6610
6611 for {
6612 x := v_1
6613 if v_2.Op != OpAMD64FlagGT_ULT {
6614 break
6615 }
6616 v.copyOf(x)
6617 return true
6618 }
6619
6620
6621 for {
6622 x := v_1
6623 if v_2.Op != OpAMD64FlagLT_ULT {
6624 break
6625 }
6626 v.copyOf(x)
6627 return true
6628 }
6629
6630
6631 for {
6632 y := v_0
6633 if v_2.Op != OpAMD64FlagLT_UGT {
6634 break
6635 }
6636 v.copyOf(y)
6637 return true
6638 }
6639 return false
6640 }
6641 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6642 v_2 := v.Args[2]
6643 v_1 := v.Args[1]
6644 v_0 := v.Args[0]
6645
6646
6647 for {
6648 x := v_0
6649 y := v_1
6650 if v_2.Op != OpAMD64InvertFlags {
6651 break
6652 }
6653 cond := v_2.Args[0]
6654 v.reset(OpAMD64CMOVWGT)
6655 v.AddArg3(x, y, cond)
6656 return true
6657 }
6658
6659
6660 for {
6661 y := v_0
6662 if v_2.Op != OpAMD64FlagEQ {
6663 break
6664 }
6665 v.copyOf(y)
6666 return true
6667 }
6668
6669
6670 for {
6671 y := v_0
6672 if v_2.Op != OpAMD64FlagGT_UGT {
6673 break
6674 }
6675 v.copyOf(y)
6676 return true
6677 }
6678
6679
6680 for {
6681 y := v_0
6682 if v_2.Op != OpAMD64FlagGT_ULT {
6683 break
6684 }
6685 v.copyOf(y)
6686 return true
6687 }
6688
6689
6690 for {
6691 x := v_1
6692 if v_2.Op != OpAMD64FlagLT_ULT {
6693 break
6694 }
6695 v.copyOf(x)
6696 return true
6697 }
6698
6699
6700 for {
6701 x := v_1
6702 if v_2.Op != OpAMD64FlagLT_UGT {
6703 break
6704 }
6705 v.copyOf(x)
6706 return true
6707 }
6708 return false
6709 }
6710 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6711 v_2 := v.Args[2]
6712 v_1 := v.Args[1]
6713 v_0 := v.Args[0]
6714
6715
6716 for {
6717 x := v_0
6718 y := v_1
6719 if v_2.Op != OpAMD64InvertFlags {
6720 break
6721 }
6722 cond := v_2.Args[0]
6723 v.reset(OpAMD64CMOVWNE)
6724 v.AddArg3(x, y, cond)
6725 return true
6726 }
6727
6728
6729 for {
6730 y := v_0
6731 if v_2.Op != OpAMD64FlagEQ {
6732 break
6733 }
6734 v.copyOf(y)
6735 return true
6736 }
6737
6738
6739 for {
6740 x := v_1
6741 if v_2.Op != OpAMD64FlagGT_UGT {
6742 break
6743 }
6744 v.copyOf(x)
6745 return true
6746 }
6747
6748
6749 for {
6750 x := v_1
6751 if v_2.Op != OpAMD64FlagGT_ULT {
6752 break
6753 }
6754 v.copyOf(x)
6755 return true
6756 }
6757
6758
6759 for {
6760 x := v_1
6761 if v_2.Op != OpAMD64FlagLT_ULT {
6762 break
6763 }
6764 v.copyOf(x)
6765 return true
6766 }
6767
6768
6769 for {
6770 x := v_1
6771 if v_2.Op != OpAMD64FlagLT_UGT {
6772 break
6773 }
6774 v.copyOf(x)
6775 return true
6776 }
6777 return false
6778 }
6779 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6780 v_1 := v.Args[1]
6781 v_0 := v.Args[0]
6782 b := v.Block
6783
6784
6785 for {
6786 x := v_0
6787 if v_1.Op != OpAMD64MOVLconst {
6788 break
6789 }
6790 c := auxIntToInt32(v_1.AuxInt)
6791 v.reset(OpAMD64CMPBconst)
6792 v.AuxInt = int8ToAuxInt(int8(c))
6793 v.AddArg(x)
6794 return true
6795 }
6796
6797
6798 for {
6799 if v_0.Op != OpAMD64MOVLconst {
6800 break
6801 }
6802 c := auxIntToInt32(v_0.AuxInt)
6803 x := v_1
6804 v.reset(OpAMD64InvertFlags)
6805 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6806 v0.AuxInt = int8ToAuxInt(int8(c))
6807 v0.AddArg(x)
6808 v.AddArg(v0)
6809 return true
6810 }
6811
6812
6813
6814 for {
6815 x := v_0
6816 y := v_1
6817 if !(x.ID > y.ID) {
6818 break
6819 }
6820 v.reset(OpAMD64InvertFlags)
6821 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6822 v0.AddArg2(y, x)
6823 v.AddArg(v0)
6824 return true
6825 }
6826
6827
6828
6829 for {
6830 l := v_0
6831 if l.Op != OpAMD64MOVBload {
6832 break
6833 }
6834 off := auxIntToInt32(l.AuxInt)
6835 sym := auxToSym(l.Aux)
6836 mem := l.Args[1]
6837 ptr := l.Args[0]
6838 x := v_1
6839 if !(canMergeLoad(v, l) && clobber(l)) {
6840 break
6841 }
6842 v.reset(OpAMD64CMPBload)
6843 v.AuxInt = int32ToAuxInt(off)
6844 v.Aux = symToAux(sym)
6845 v.AddArg3(ptr, x, mem)
6846 return true
6847 }
6848
6849
6850
6851 for {
6852 x := v_0
6853 l := v_1
6854 if l.Op != OpAMD64MOVBload {
6855 break
6856 }
6857 off := auxIntToInt32(l.AuxInt)
6858 sym := auxToSym(l.Aux)
6859 mem := l.Args[1]
6860 ptr := l.Args[0]
6861 if !(canMergeLoad(v, l) && clobber(l)) {
6862 break
6863 }
6864 v.reset(OpAMD64InvertFlags)
6865 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6866 v0.AuxInt = int32ToAuxInt(off)
6867 v0.Aux = symToAux(sym)
6868 v0.AddArg3(ptr, x, mem)
6869 v.AddArg(v0)
6870 return true
6871 }
6872 return false
6873 }
6874 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6875 v_0 := v.Args[0]
6876 b := v.Block
6877
6878
6879
6880 for {
6881 y := auxIntToInt8(v.AuxInt)
6882 if v_0.Op != OpAMD64MOVLconst {
6883 break
6884 }
6885 x := auxIntToInt32(v_0.AuxInt)
6886 if !(int8(x) == y) {
6887 break
6888 }
6889 v.reset(OpAMD64FlagEQ)
6890 return true
6891 }
6892
6893
6894
6895 for {
6896 y := auxIntToInt8(v.AuxInt)
6897 if v_0.Op != OpAMD64MOVLconst {
6898 break
6899 }
6900 x := auxIntToInt32(v_0.AuxInt)
6901 if !(int8(x) < y && uint8(x) < uint8(y)) {
6902 break
6903 }
6904 v.reset(OpAMD64FlagLT_ULT)
6905 return true
6906 }
6907
6908
6909
6910 for {
6911 y := auxIntToInt8(v.AuxInt)
6912 if v_0.Op != OpAMD64MOVLconst {
6913 break
6914 }
6915 x := auxIntToInt32(v_0.AuxInt)
6916 if !(int8(x) < y && uint8(x) > uint8(y)) {
6917 break
6918 }
6919 v.reset(OpAMD64FlagLT_UGT)
6920 return true
6921 }
6922
6923
6924
6925 for {
6926 y := auxIntToInt8(v.AuxInt)
6927 if v_0.Op != OpAMD64MOVLconst {
6928 break
6929 }
6930 x := auxIntToInt32(v_0.AuxInt)
6931 if !(int8(x) > y && uint8(x) < uint8(y)) {
6932 break
6933 }
6934 v.reset(OpAMD64FlagGT_ULT)
6935 return true
6936 }
6937
6938
6939
6940 for {
6941 y := auxIntToInt8(v.AuxInt)
6942 if v_0.Op != OpAMD64MOVLconst {
6943 break
6944 }
6945 x := auxIntToInt32(v_0.AuxInt)
6946 if !(int8(x) > y && uint8(x) > uint8(y)) {
6947 break
6948 }
6949 v.reset(OpAMD64FlagGT_UGT)
6950 return true
6951 }
6952
6953
6954
6955 for {
6956 n := auxIntToInt8(v.AuxInt)
6957 if v_0.Op != OpAMD64ANDLconst {
6958 break
6959 }
6960 m := auxIntToInt32(v_0.AuxInt)
6961 if !(0 <= int8(m) && int8(m) < n) {
6962 break
6963 }
6964 v.reset(OpAMD64FlagLT_ULT)
6965 return true
6966 }
6967
6968
6969
6970 for {
6971 if auxIntToInt8(v.AuxInt) != 0 {
6972 break
6973 }
6974 a := v_0
6975 if a.Op != OpAMD64ANDL {
6976 break
6977 }
6978 y := a.Args[1]
6979 x := a.Args[0]
6980 if !(a.Uses == 1) {
6981 break
6982 }
6983 v.reset(OpAMD64TESTB)
6984 v.AddArg2(x, y)
6985 return true
6986 }
6987
6988
6989
6990 for {
6991 if auxIntToInt8(v.AuxInt) != 0 {
6992 break
6993 }
6994 a := v_0
6995 if a.Op != OpAMD64ANDLconst {
6996 break
6997 }
6998 c := auxIntToInt32(a.AuxInt)
6999 x := a.Args[0]
7000 if !(a.Uses == 1) {
7001 break
7002 }
7003 v.reset(OpAMD64TESTBconst)
7004 v.AuxInt = int8ToAuxInt(int8(c))
7005 v.AddArg(x)
7006 return true
7007 }
7008
7009
7010 for {
7011 if auxIntToInt8(v.AuxInt) != 0 {
7012 break
7013 }
7014 x := v_0
7015 v.reset(OpAMD64TESTB)
7016 v.AddArg2(x, x)
7017 return true
7018 }
7019
7020
7021
7022 for {
7023 c := auxIntToInt8(v.AuxInt)
7024 l := v_0
7025 if l.Op != OpAMD64MOVBload {
7026 break
7027 }
7028 off := auxIntToInt32(l.AuxInt)
7029 sym := auxToSym(l.Aux)
7030 mem := l.Args[1]
7031 ptr := l.Args[0]
7032 if !(l.Uses == 1 && clobber(l)) {
7033 break
7034 }
7035 b = l.Block
7036 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
7037 v.copyOf(v0)
7038 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
7039 v0.Aux = symToAux(sym)
7040 v0.AddArg2(ptr, mem)
7041 return true
7042 }
7043 return false
7044 }
7045 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
7046 v_1 := v.Args[1]
7047 v_0 := v.Args[0]
7048
7049
7050
7051 for {
7052 valoff1 := auxIntToValAndOff(v.AuxInt)
7053 sym := auxToSym(v.Aux)
7054 if v_0.Op != OpAMD64ADDQconst {
7055 break
7056 }
7057 off2 := auxIntToInt32(v_0.AuxInt)
7058 base := v_0.Args[0]
7059 mem := v_1
7060 if !(ValAndOff(valoff1).canAdd32(off2)) {
7061 break
7062 }
7063 v.reset(OpAMD64CMPBconstload)
7064 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7065 v.Aux = symToAux(sym)
7066 v.AddArg2(base, mem)
7067 return true
7068 }
7069
7070
7071
7072 for {
7073 valoff1 := auxIntToValAndOff(v.AuxInt)
7074 sym1 := auxToSym(v.Aux)
7075 if v_0.Op != OpAMD64LEAQ {
7076 break
7077 }
7078 off2 := auxIntToInt32(v_0.AuxInt)
7079 sym2 := auxToSym(v_0.Aux)
7080 base := v_0.Args[0]
7081 mem := v_1
7082 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7083 break
7084 }
7085 v.reset(OpAMD64CMPBconstload)
7086 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7087 v.Aux = symToAux(mergeSym(sym1, sym2))
7088 v.AddArg2(base, mem)
7089 return true
7090 }
7091 return false
7092 }
7093 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
7094 v_2 := v.Args[2]
7095 v_1 := v.Args[1]
7096 v_0 := v.Args[0]
7097
7098
7099
7100 for {
7101 off1 := auxIntToInt32(v.AuxInt)
7102 sym := auxToSym(v.Aux)
7103 if v_0.Op != OpAMD64ADDQconst {
7104 break
7105 }
7106 off2 := auxIntToInt32(v_0.AuxInt)
7107 base := v_0.Args[0]
7108 val := v_1
7109 mem := v_2
7110 if !(is32Bit(int64(off1) + int64(off2))) {
7111 break
7112 }
7113 v.reset(OpAMD64CMPBload)
7114 v.AuxInt = int32ToAuxInt(off1 + off2)
7115 v.Aux = symToAux(sym)
7116 v.AddArg3(base, val, mem)
7117 return true
7118 }
7119
7120
7121
7122 for {
7123 off1 := auxIntToInt32(v.AuxInt)
7124 sym1 := auxToSym(v.Aux)
7125 if v_0.Op != OpAMD64LEAQ {
7126 break
7127 }
7128 off2 := auxIntToInt32(v_0.AuxInt)
7129 sym2 := auxToSym(v_0.Aux)
7130 base := v_0.Args[0]
7131 val := v_1
7132 mem := v_2
7133 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7134 break
7135 }
7136 v.reset(OpAMD64CMPBload)
7137 v.AuxInt = int32ToAuxInt(off1 + off2)
7138 v.Aux = symToAux(mergeSym(sym1, sym2))
7139 v.AddArg3(base, val, mem)
7140 return true
7141 }
7142
7143
7144
7145 for {
7146 off := auxIntToInt32(v.AuxInt)
7147 sym := auxToSym(v.Aux)
7148 ptr := v_0
7149 if v_1.Op != OpAMD64MOVLconst {
7150 break
7151 }
7152 c := auxIntToInt32(v_1.AuxInt)
7153 mem := v_2
7154 if !(validValAndOff(int64(int8(c)), int64(off))) {
7155 break
7156 }
7157 v.reset(OpAMD64CMPBconstload)
7158 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
7159 v.Aux = symToAux(sym)
7160 v.AddArg2(ptr, mem)
7161 return true
7162 }
7163 return false
7164 }
7165 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
7166 v_1 := v.Args[1]
7167 v_0 := v.Args[0]
7168 b := v.Block
7169
7170
7171 for {
7172 x := v_0
7173 if v_1.Op != OpAMD64MOVLconst {
7174 break
7175 }
7176 c := auxIntToInt32(v_1.AuxInt)
7177 v.reset(OpAMD64CMPLconst)
7178 v.AuxInt = int32ToAuxInt(c)
7179 v.AddArg(x)
7180 return true
7181 }
7182
7183
7184 for {
7185 if v_0.Op != OpAMD64MOVLconst {
7186 break
7187 }
7188 c := auxIntToInt32(v_0.AuxInt)
7189 x := v_1
7190 v.reset(OpAMD64InvertFlags)
7191 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
7192 v0.AuxInt = int32ToAuxInt(c)
7193 v0.AddArg(x)
7194 v.AddArg(v0)
7195 return true
7196 }
7197
7198
7199
7200 for {
7201 x := v_0
7202 y := v_1
7203 if !(x.ID > y.ID) {
7204 break
7205 }
7206 v.reset(OpAMD64InvertFlags)
7207 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
7208 v0.AddArg2(y, x)
7209 v.AddArg(v0)
7210 return true
7211 }
7212
7213
7214
7215 for {
7216 l := v_0
7217 if l.Op != OpAMD64MOVLload {
7218 break
7219 }
7220 off := auxIntToInt32(l.AuxInt)
7221 sym := auxToSym(l.Aux)
7222 mem := l.Args[1]
7223 ptr := l.Args[0]
7224 x := v_1
7225 if !(canMergeLoad(v, l) && clobber(l)) {
7226 break
7227 }
7228 v.reset(OpAMD64CMPLload)
7229 v.AuxInt = int32ToAuxInt(off)
7230 v.Aux = symToAux(sym)
7231 v.AddArg3(ptr, x, mem)
7232 return true
7233 }
7234
7235
7236
7237 for {
7238 x := v_0
7239 l := v_1
7240 if l.Op != OpAMD64MOVLload {
7241 break
7242 }
7243 off := auxIntToInt32(l.AuxInt)
7244 sym := auxToSym(l.Aux)
7245 mem := l.Args[1]
7246 ptr := l.Args[0]
7247 if !(canMergeLoad(v, l) && clobber(l)) {
7248 break
7249 }
7250 v.reset(OpAMD64InvertFlags)
7251 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
7252 v0.AuxInt = int32ToAuxInt(off)
7253 v0.Aux = symToAux(sym)
7254 v0.AddArg3(ptr, x, mem)
7255 v.AddArg(v0)
7256 return true
7257 }
7258 return false
7259 }
7260 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
7261 v_0 := v.Args[0]
7262 b := v.Block
7263
7264
7265
7266 for {
7267 y := auxIntToInt32(v.AuxInt)
7268 if v_0.Op != OpAMD64MOVLconst {
7269 break
7270 }
7271 x := auxIntToInt32(v_0.AuxInt)
7272 if !(x == y) {
7273 break
7274 }
7275 v.reset(OpAMD64FlagEQ)
7276 return true
7277 }
7278
7279
7280
7281 for {
7282 y := auxIntToInt32(v.AuxInt)
7283 if v_0.Op != OpAMD64MOVLconst {
7284 break
7285 }
7286 x := auxIntToInt32(v_0.AuxInt)
7287 if !(x < y && uint32(x) < uint32(y)) {
7288 break
7289 }
7290 v.reset(OpAMD64FlagLT_ULT)
7291 return true
7292 }
7293
7294
7295
7296 for {
7297 y := auxIntToInt32(v.AuxInt)
7298 if v_0.Op != OpAMD64MOVLconst {
7299 break
7300 }
7301 x := auxIntToInt32(v_0.AuxInt)
7302 if !(x < y && uint32(x) > uint32(y)) {
7303 break
7304 }
7305 v.reset(OpAMD64FlagLT_UGT)
7306 return true
7307 }
7308
7309
7310
7311 for {
7312 y := auxIntToInt32(v.AuxInt)
7313 if v_0.Op != OpAMD64MOVLconst {
7314 break
7315 }
7316 x := auxIntToInt32(v_0.AuxInt)
7317 if !(x > y && uint32(x) < uint32(y)) {
7318 break
7319 }
7320 v.reset(OpAMD64FlagGT_ULT)
7321 return true
7322 }
7323
7324
7325
7326 for {
7327 y := auxIntToInt32(v.AuxInt)
7328 if v_0.Op != OpAMD64MOVLconst {
7329 break
7330 }
7331 x := auxIntToInt32(v_0.AuxInt)
7332 if !(x > y && uint32(x) > uint32(y)) {
7333 break
7334 }
7335 v.reset(OpAMD64FlagGT_UGT)
7336 return true
7337 }
7338
7339
7340
7341 for {
7342 n := auxIntToInt32(v.AuxInt)
7343 if v_0.Op != OpAMD64SHRLconst {
7344 break
7345 }
7346 c := auxIntToInt8(v_0.AuxInt)
7347 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7348 break
7349 }
7350 v.reset(OpAMD64FlagLT_ULT)
7351 return true
7352 }
7353
7354
7355
7356 for {
7357 n := auxIntToInt32(v.AuxInt)
7358 if v_0.Op != OpAMD64ANDLconst {
7359 break
7360 }
7361 m := auxIntToInt32(v_0.AuxInt)
7362 if !(0 <= m && m < n) {
7363 break
7364 }
7365 v.reset(OpAMD64FlagLT_ULT)
7366 return true
7367 }
7368
7369
7370
7371 for {
7372 if auxIntToInt32(v.AuxInt) != 0 {
7373 break
7374 }
7375 a := v_0
7376 if a.Op != OpAMD64ANDL {
7377 break
7378 }
7379 y := a.Args[1]
7380 x := a.Args[0]
7381 if !(a.Uses == 1) {
7382 break
7383 }
7384 v.reset(OpAMD64TESTL)
7385 v.AddArg2(x, y)
7386 return true
7387 }
7388
7389
7390
7391 for {
7392 if auxIntToInt32(v.AuxInt) != 0 {
7393 break
7394 }
7395 a := v_0
7396 if a.Op != OpAMD64ANDLconst {
7397 break
7398 }
7399 c := auxIntToInt32(a.AuxInt)
7400 x := a.Args[0]
7401 if !(a.Uses == 1) {
7402 break
7403 }
7404 v.reset(OpAMD64TESTLconst)
7405 v.AuxInt = int32ToAuxInt(c)
7406 v.AddArg(x)
7407 return true
7408 }
7409
7410
7411 for {
7412 if auxIntToInt32(v.AuxInt) != 0 {
7413 break
7414 }
7415 x := v_0
7416 v.reset(OpAMD64TESTL)
7417 v.AddArg2(x, x)
7418 return true
7419 }
7420
7421
7422
7423 for {
7424 c := auxIntToInt32(v.AuxInt)
7425 l := v_0
7426 if l.Op != OpAMD64MOVLload {
7427 break
7428 }
7429 off := auxIntToInt32(l.AuxInt)
7430 sym := auxToSym(l.Aux)
7431 mem := l.Args[1]
7432 ptr := l.Args[0]
7433 if !(l.Uses == 1 && clobber(l)) {
7434 break
7435 }
7436 b = l.Block
7437 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7438 v.copyOf(v0)
7439 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
7440 v0.Aux = symToAux(sym)
7441 v0.AddArg2(ptr, mem)
7442 return true
7443 }
7444 return false
7445 }
7446 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7447 v_1 := v.Args[1]
7448 v_0 := v.Args[0]
7449
7450
7451
7452 for {
7453 valoff1 := auxIntToValAndOff(v.AuxInt)
7454 sym := auxToSym(v.Aux)
7455 if v_0.Op != OpAMD64ADDQconst {
7456 break
7457 }
7458 off2 := auxIntToInt32(v_0.AuxInt)
7459 base := v_0.Args[0]
7460 mem := v_1
7461 if !(ValAndOff(valoff1).canAdd32(off2)) {
7462 break
7463 }
7464 v.reset(OpAMD64CMPLconstload)
7465 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7466 v.Aux = symToAux(sym)
7467 v.AddArg2(base, mem)
7468 return true
7469 }
7470
7471
7472
7473 for {
7474 valoff1 := auxIntToValAndOff(v.AuxInt)
7475 sym1 := auxToSym(v.Aux)
7476 if v_0.Op != OpAMD64LEAQ {
7477 break
7478 }
7479 off2 := auxIntToInt32(v_0.AuxInt)
7480 sym2 := auxToSym(v_0.Aux)
7481 base := v_0.Args[0]
7482 mem := v_1
7483 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7484 break
7485 }
7486 v.reset(OpAMD64CMPLconstload)
7487 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7488 v.Aux = symToAux(mergeSym(sym1, sym2))
7489 v.AddArg2(base, mem)
7490 return true
7491 }
7492 return false
7493 }
7494 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7495 v_2 := v.Args[2]
7496 v_1 := v.Args[1]
7497 v_0 := v.Args[0]
7498
7499
7500
7501 for {
7502 off1 := auxIntToInt32(v.AuxInt)
7503 sym := auxToSym(v.Aux)
7504 if v_0.Op != OpAMD64ADDQconst {
7505 break
7506 }
7507 off2 := auxIntToInt32(v_0.AuxInt)
7508 base := v_0.Args[0]
7509 val := v_1
7510 mem := v_2
7511 if !(is32Bit(int64(off1) + int64(off2))) {
7512 break
7513 }
7514 v.reset(OpAMD64CMPLload)
7515 v.AuxInt = int32ToAuxInt(off1 + off2)
7516 v.Aux = symToAux(sym)
7517 v.AddArg3(base, val, mem)
7518 return true
7519 }
7520
7521
7522
7523 for {
7524 off1 := auxIntToInt32(v.AuxInt)
7525 sym1 := auxToSym(v.Aux)
7526 if v_0.Op != OpAMD64LEAQ {
7527 break
7528 }
7529 off2 := auxIntToInt32(v_0.AuxInt)
7530 sym2 := auxToSym(v_0.Aux)
7531 base := v_0.Args[0]
7532 val := v_1
7533 mem := v_2
7534 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7535 break
7536 }
7537 v.reset(OpAMD64CMPLload)
7538 v.AuxInt = int32ToAuxInt(off1 + off2)
7539 v.Aux = symToAux(mergeSym(sym1, sym2))
7540 v.AddArg3(base, val, mem)
7541 return true
7542 }
7543
7544
7545
7546 for {
7547 off := auxIntToInt32(v.AuxInt)
7548 sym := auxToSym(v.Aux)
7549 ptr := v_0
7550 if v_1.Op != OpAMD64MOVLconst {
7551 break
7552 }
7553 c := auxIntToInt32(v_1.AuxInt)
7554 mem := v_2
7555 if !(validValAndOff(int64(c), int64(off))) {
7556 break
7557 }
7558 v.reset(OpAMD64CMPLconstload)
7559 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
7560 v.Aux = symToAux(sym)
7561 v.AddArg2(ptr, mem)
7562 return true
7563 }
7564 return false
7565 }
7566 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7567 v_1 := v.Args[1]
7568 v_0 := v.Args[0]
7569 b := v.Block
7570
7571
7572
7573 for {
7574 x := v_0
7575 if v_1.Op != OpAMD64MOVQconst {
7576 break
7577 }
7578 c := auxIntToInt64(v_1.AuxInt)
7579 if !(is32Bit(c)) {
7580 break
7581 }
7582 v.reset(OpAMD64CMPQconst)
7583 v.AuxInt = int32ToAuxInt(int32(c))
7584 v.AddArg(x)
7585 return true
7586 }
7587
7588
7589
7590 for {
7591 if v_0.Op != OpAMD64MOVQconst {
7592 break
7593 }
7594 c := auxIntToInt64(v_0.AuxInt)
7595 x := v_1
7596 if !(is32Bit(c)) {
7597 break
7598 }
7599 v.reset(OpAMD64InvertFlags)
7600 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7601 v0.AuxInt = int32ToAuxInt(int32(c))
7602 v0.AddArg(x)
7603 v.AddArg(v0)
7604 return true
7605 }
7606
7607
7608
7609 for {
7610 x := v_0
7611 y := v_1
7612 if !(x.ID > y.ID) {
7613 break
7614 }
7615 v.reset(OpAMD64InvertFlags)
7616 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7617 v0.AddArg2(y, x)
7618 v.AddArg(v0)
7619 return true
7620 }
7621
7622
7623
7624 for {
7625 if v_0.Op != OpAMD64MOVQconst {
7626 break
7627 }
7628 x := auxIntToInt64(v_0.AuxInt)
7629 if v_1.Op != OpAMD64MOVQconst {
7630 break
7631 }
7632 y := auxIntToInt64(v_1.AuxInt)
7633 if !(x == y) {
7634 break
7635 }
7636 v.reset(OpAMD64FlagEQ)
7637 return true
7638 }
7639
7640
7641
7642 for {
7643 if v_0.Op != OpAMD64MOVQconst {
7644 break
7645 }
7646 x := auxIntToInt64(v_0.AuxInt)
7647 if v_1.Op != OpAMD64MOVQconst {
7648 break
7649 }
7650 y := auxIntToInt64(v_1.AuxInt)
7651 if !(x < y && uint64(x) < uint64(y)) {
7652 break
7653 }
7654 v.reset(OpAMD64FlagLT_ULT)
7655 return true
7656 }
7657
7658
7659
7660 for {
7661 if v_0.Op != OpAMD64MOVQconst {
7662 break
7663 }
7664 x := auxIntToInt64(v_0.AuxInt)
7665 if v_1.Op != OpAMD64MOVQconst {
7666 break
7667 }
7668 y := auxIntToInt64(v_1.AuxInt)
7669 if !(x < y && uint64(x) > uint64(y)) {
7670 break
7671 }
7672 v.reset(OpAMD64FlagLT_UGT)
7673 return true
7674 }
7675
7676
7677
7678 for {
7679 if v_0.Op != OpAMD64MOVQconst {
7680 break
7681 }
7682 x := auxIntToInt64(v_0.AuxInt)
7683 if v_1.Op != OpAMD64MOVQconst {
7684 break
7685 }
7686 y := auxIntToInt64(v_1.AuxInt)
7687 if !(x > y && uint64(x) < uint64(y)) {
7688 break
7689 }
7690 v.reset(OpAMD64FlagGT_ULT)
7691 return true
7692 }
7693
7694
7695
7696 for {
7697 if v_0.Op != OpAMD64MOVQconst {
7698 break
7699 }
7700 x := auxIntToInt64(v_0.AuxInt)
7701 if v_1.Op != OpAMD64MOVQconst {
7702 break
7703 }
7704 y := auxIntToInt64(v_1.AuxInt)
7705 if !(x > y && uint64(x) > uint64(y)) {
7706 break
7707 }
7708 v.reset(OpAMD64FlagGT_UGT)
7709 return true
7710 }
7711
7712
7713
7714 for {
7715 l := v_0
7716 if l.Op != OpAMD64MOVQload {
7717 break
7718 }
7719 off := auxIntToInt32(l.AuxInt)
7720 sym := auxToSym(l.Aux)
7721 mem := l.Args[1]
7722 ptr := l.Args[0]
7723 x := v_1
7724 if !(canMergeLoad(v, l) && clobber(l)) {
7725 break
7726 }
7727 v.reset(OpAMD64CMPQload)
7728 v.AuxInt = int32ToAuxInt(off)
7729 v.Aux = symToAux(sym)
7730 v.AddArg3(ptr, x, mem)
7731 return true
7732 }
7733
7734
7735
7736 for {
7737 x := v_0
7738 l := v_1
7739 if l.Op != OpAMD64MOVQload {
7740 break
7741 }
7742 off := auxIntToInt32(l.AuxInt)
7743 sym := auxToSym(l.Aux)
7744 mem := l.Args[1]
7745 ptr := l.Args[0]
7746 if !(canMergeLoad(v, l) && clobber(l)) {
7747 break
7748 }
7749 v.reset(OpAMD64InvertFlags)
7750 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7751 v0.AuxInt = int32ToAuxInt(off)
7752 v0.Aux = symToAux(sym)
7753 v0.AddArg3(ptr, x, mem)
7754 v.AddArg(v0)
7755 return true
7756 }
7757 return false
7758 }
7759 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7760 v_0 := v.Args[0]
7761 b := v.Block
7762
7763
7764 for {
7765 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7766 break
7767 }
7768 v_0_0 := v_0.Args[0]
7769 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
7770 break
7771 }
7772 v_0_0_0 := v_0_0.Args[0]
7773 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
7774 break
7775 }
7776 v.reset(OpAMD64FlagLT_ULT)
7777 return true
7778 }
7779
7780
7781 for {
7782 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7783 break
7784 }
7785 v_0_0 := v_0.Args[0]
7786 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
7787 break
7788 }
7789 v_0_0_0 := v_0_0.Args[0]
7790 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
7791 break
7792 }
7793 v.reset(OpAMD64FlagLT_ULT)
7794 return true
7795 }
7796
7797
7798
7799 for {
7800 y := auxIntToInt32(v.AuxInt)
7801 if v_0.Op != OpAMD64MOVQconst {
7802 break
7803 }
7804 x := auxIntToInt64(v_0.AuxInt)
7805 if !(x == int64(y)) {
7806 break
7807 }
7808 v.reset(OpAMD64FlagEQ)
7809 return true
7810 }
7811
7812
7813
7814 for {
7815 y := auxIntToInt32(v.AuxInt)
7816 if v_0.Op != OpAMD64MOVQconst {
7817 break
7818 }
7819 x := auxIntToInt64(v_0.AuxInt)
7820 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7821 break
7822 }
7823 v.reset(OpAMD64FlagLT_ULT)
7824 return true
7825 }
7826
7827
7828
7829 for {
7830 y := auxIntToInt32(v.AuxInt)
7831 if v_0.Op != OpAMD64MOVQconst {
7832 break
7833 }
7834 x := auxIntToInt64(v_0.AuxInt)
7835 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7836 break
7837 }
7838 v.reset(OpAMD64FlagLT_UGT)
7839 return true
7840 }
7841
7842
7843
7844 for {
7845 y := auxIntToInt32(v.AuxInt)
7846 if v_0.Op != OpAMD64MOVQconst {
7847 break
7848 }
7849 x := auxIntToInt64(v_0.AuxInt)
7850 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7851 break
7852 }
7853 v.reset(OpAMD64FlagGT_ULT)
7854 return true
7855 }
7856
7857
7858
7859 for {
7860 y := auxIntToInt32(v.AuxInt)
7861 if v_0.Op != OpAMD64MOVQconst {
7862 break
7863 }
7864 x := auxIntToInt64(v_0.AuxInt)
7865 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7866 break
7867 }
7868 v.reset(OpAMD64FlagGT_UGT)
7869 return true
7870 }
7871
7872
7873
7874 for {
7875 c := auxIntToInt32(v.AuxInt)
7876 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7877 break
7878 }
7879 v.reset(OpAMD64FlagLT_ULT)
7880 return true
7881 }
7882
7883
7884
7885 for {
7886 c := auxIntToInt32(v.AuxInt)
7887 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7888 break
7889 }
7890 v.reset(OpAMD64FlagLT_ULT)
7891 return true
7892 }
7893
7894
7895
7896 for {
7897 n := auxIntToInt32(v.AuxInt)
7898 if v_0.Op != OpAMD64SHRQconst {
7899 break
7900 }
7901 c := auxIntToInt8(v_0.AuxInt)
7902 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7903 break
7904 }
7905 v.reset(OpAMD64FlagLT_ULT)
7906 return true
7907 }
7908
7909
7910
7911 for {
7912 n := auxIntToInt32(v.AuxInt)
7913 if v_0.Op != OpAMD64ANDQconst {
7914 break
7915 }
7916 m := auxIntToInt32(v_0.AuxInt)
7917 if !(0 <= m && m < n) {
7918 break
7919 }
7920 v.reset(OpAMD64FlagLT_ULT)
7921 return true
7922 }
7923
7924
7925
7926 for {
7927 n := auxIntToInt32(v.AuxInt)
7928 if v_0.Op != OpAMD64ANDLconst {
7929 break
7930 }
7931 m := auxIntToInt32(v_0.AuxInt)
7932 if !(0 <= m && m < n) {
7933 break
7934 }
7935 v.reset(OpAMD64FlagLT_ULT)
7936 return true
7937 }
7938
7939
7940
7941 for {
7942 if auxIntToInt32(v.AuxInt) != 0 {
7943 break
7944 }
7945 a := v_0
7946 if a.Op != OpAMD64ANDQ {
7947 break
7948 }
7949 y := a.Args[1]
7950 x := a.Args[0]
7951 if !(a.Uses == 1) {
7952 break
7953 }
7954 v.reset(OpAMD64TESTQ)
7955 v.AddArg2(x, y)
7956 return true
7957 }
7958
7959
7960
7961 for {
7962 if auxIntToInt32(v.AuxInt) != 0 {
7963 break
7964 }
7965 a := v_0
7966 if a.Op != OpAMD64ANDQconst {
7967 break
7968 }
7969 c := auxIntToInt32(a.AuxInt)
7970 x := a.Args[0]
7971 if !(a.Uses == 1) {
7972 break
7973 }
7974 v.reset(OpAMD64TESTQconst)
7975 v.AuxInt = int32ToAuxInt(c)
7976 v.AddArg(x)
7977 return true
7978 }
7979
7980
7981 for {
7982 if auxIntToInt32(v.AuxInt) != 0 {
7983 break
7984 }
7985 x := v_0
7986 v.reset(OpAMD64TESTQ)
7987 v.AddArg2(x, x)
7988 return true
7989 }
7990
7991
7992
7993 for {
7994 c := auxIntToInt32(v.AuxInt)
7995 l := v_0
7996 if l.Op != OpAMD64MOVQload {
7997 break
7998 }
7999 off := auxIntToInt32(l.AuxInt)
8000 sym := auxToSym(l.Aux)
8001 mem := l.Args[1]
8002 ptr := l.Args[0]
8003 if !(l.Uses == 1 && clobber(l)) {
8004 break
8005 }
8006 b = l.Block
8007 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
8008 v.copyOf(v0)
8009 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
8010 v0.Aux = symToAux(sym)
8011 v0.AddArg2(ptr, mem)
8012 return true
8013 }
8014 return false
8015 }
8016 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
8017 v_1 := v.Args[1]
8018 v_0 := v.Args[0]
8019
8020
8021
8022 for {
8023 valoff1 := auxIntToValAndOff(v.AuxInt)
8024 sym := auxToSym(v.Aux)
8025 if v_0.Op != OpAMD64ADDQconst {
8026 break
8027 }
8028 off2 := auxIntToInt32(v_0.AuxInt)
8029 base := v_0.Args[0]
8030 mem := v_1
8031 if !(ValAndOff(valoff1).canAdd32(off2)) {
8032 break
8033 }
8034 v.reset(OpAMD64CMPQconstload)
8035 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8036 v.Aux = symToAux(sym)
8037 v.AddArg2(base, mem)
8038 return true
8039 }
8040
8041
8042
8043 for {
8044 valoff1 := auxIntToValAndOff(v.AuxInt)
8045 sym1 := auxToSym(v.Aux)
8046 if v_0.Op != OpAMD64LEAQ {
8047 break
8048 }
8049 off2 := auxIntToInt32(v_0.AuxInt)
8050 sym2 := auxToSym(v_0.Aux)
8051 base := v_0.Args[0]
8052 mem := v_1
8053 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8054 break
8055 }
8056 v.reset(OpAMD64CMPQconstload)
8057 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8058 v.Aux = symToAux(mergeSym(sym1, sym2))
8059 v.AddArg2(base, mem)
8060 return true
8061 }
8062 return false
8063 }
8064 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
8065 v_2 := v.Args[2]
8066 v_1 := v.Args[1]
8067 v_0 := v.Args[0]
8068
8069
8070
8071 for {
8072 off1 := auxIntToInt32(v.AuxInt)
8073 sym := auxToSym(v.Aux)
8074 if v_0.Op != OpAMD64ADDQconst {
8075 break
8076 }
8077 off2 := auxIntToInt32(v_0.AuxInt)
8078 base := v_0.Args[0]
8079 val := v_1
8080 mem := v_2
8081 if !(is32Bit(int64(off1) + int64(off2))) {
8082 break
8083 }
8084 v.reset(OpAMD64CMPQload)
8085 v.AuxInt = int32ToAuxInt(off1 + off2)
8086 v.Aux = symToAux(sym)
8087 v.AddArg3(base, val, mem)
8088 return true
8089 }
8090
8091
8092
8093 for {
8094 off1 := auxIntToInt32(v.AuxInt)
8095 sym1 := auxToSym(v.Aux)
8096 if v_0.Op != OpAMD64LEAQ {
8097 break
8098 }
8099 off2 := auxIntToInt32(v_0.AuxInt)
8100 sym2 := auxToSym(v_0.Aux)
8101 base := v_0.Args[0]
8102 val := v_1
8103 mem := v_2
8104 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8105 break
8106 }
8107 v.reset(OpAMD64CMPQload)
8108 v.AuxInt = int32ToAuxInt(off1 + off2)
8109 v.Aux = symToAux(mergeSym(sym1, sym2))
8110 v.AddArg3(base, val, mem)
8111 return true
8112 }
8113
8114
8115
8116 for {
8117 off := auxIntToInt32(v.AuxInt)
8118 sym := auxToSym(v.Aux)
8119 ptr := v_0
8120 if v_1.Op != OpAMD64MOVQconst {
8121 break
8122 }
8123 c := auxIntToInt64(v_1.AuxInt)
8124 mem := v_2
8125 if !(validValAndOff(c, int64(off))) {
8126 break
8127 }
8128 v.reset(OpAMD64CMPQconstload)
8129 v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off)))
8130 v.Aux = symToAux(sym)
8131 v.AddArg2(ptr, mem)
8132 return true
8133 }
8134 return false
8135 }
8136 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
8137 v_1 := v.Args[1]
8138 v_0 := v.Args[0]
8139 b := v.Block
8140
8141
8142 for {
8143 x := v_0
8144 if v_1.Op != OpAMD64MOVLconst {
8145 break
8146 }
8147 c := auxIntToInt32(v_1.AuxInt)
8148 v.reset(OpAMD64CMPWconst)
8149 v.AuxInt = int16ToAuxInt(int16(c))
8150 v.AddArg(x)
8151 return true
8152 }
8153
8154
8155 for {
8156 if v_0.Op != OpAMD64MOVLconst {
8157 break
8158 }
8159 c := auxIntToInt32(v_0.AuxInt)
8160 x := v_1
8161 v.reset(OpAMD64InvertFlags)
8162 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
8163 v0.AuxInt = int16ToAuxInt(int16(c))
8164 v0.AddArg(x)
8165 v.AddArg(v0)
8166 return true
8167 }
8168
8169
8170
8171 for {
8172 x := v_0
8173 y := v_1
8174 if !(x.ID > y.ID) {
8175 break
8176 }
8177 v.reset(OpAMD64InvertFlags)
8178 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
8179 v0.AddArg2(y, x)
8180 v.AddArg(v0)
8181 return true
8182 }
8183
8184
8185
8186 for {
8187 l := v_0
8188 if l.Op != OpAMD64MOVWload {
8189 break
8190 }
8191 off := auxIntToInt32(l.AuxInt)
8192 sym := auxToSym(l.Aux)
8193 mem := l.Args[1]
8194 ptr := l.Args[0]
8195 x := v_1
8196 if !(canMergeLoad(v, l) && clobber(l)) {
8197 break
8198 }
8199 v.reset(OpAMD64CMPWload)
8200 v.AuxInt = int32ToAuxInt(off)
8201 v.Aux = symToAux(sym)
8202 v.AddArg3(ptr, x, mem)
8203 return true
8204 }
8205
8206
8207
8208 for {
8209 x := v_0
8210 l := v_1
8211 if l.Op != OpAMD64MOVWload {
8212 break
8213 }
8214 off := auxIntToInt32(l.AuxInt)
8215 sym := auxToSym(l.Aux)
8216 mem := l.Args[1]
8217 ptr := l.Args[0]
8218 if !(canMergeLoad(v, l) && clobber(l)) {
8219 break
8220 }
8221 v.reset(OpAMD64InvertFlags)
8222 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
8223 v0.AuxInt = int32ToAuxInt(off)
8224 v0.Aux = symToAux(sym)
8225 v0.AddArg3(ptr, x, mem)
8226 v.AddArg(v0)
8227 return true
8228 }
8229 return false
8230 }
8231 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
8232 v_0 := v.Args[0]
8233 b := v.Block
8234
8235
8236
8237 for {
8238 y := auxIntToInt16(v.AuxInt)
8239 if v_0.Op != OpAMD64MOVLconst {
8240 break
8241 }
8242 x := auxIntToInt32(v_0.AuxInt)
8243 if !(int16(x) == y) {
8244 break
8245 }
8246 v.reset(OpAMD64FlagEQ)
8247 return true
8248 }
8249
8250
8251
8252 for {
8253 y := auxIntToInt16(v.AuxInt)
8254 if v_0.Op != OpAMD64MOVLconst {
8255 break
8256 }
8257 x := auxIntToInt32(v_0.AuxInt)
8258 if !(int16(x) < y && uint16(x) < uint16(y)) {
8259 break
8260 }
8261 v.reset(OpAMD64FlagLT_ULT)
8262 return true
8263 }
8264
8265
8266
8267 for {
8268 y := auxIntToInt16(v.AuxInt)
8269 if v_0.Op != OpAMD64MOVLconst {
8270 break
8271 }
8272 x := auxIntToInt32(v_0.AuxInt)
8273 if !(int16(x) < y && uint16(x) > uint16(y)) {
8274 break
8275 }
8276 v.reset(OpAMD64FlagLT_UGT)
8277 return true
8278 }
8279
8280
8281
8282 for {
8283 y := auxIntToInt16(v.AuxInt)
8284 if v_0.Op != OpAMD64MOVLconst {
8285 break
8286 }
8287 x := auxIntToInt32(v_0.AuxInt)
8288 if !(int16(x) > y && uint16(x) < uint16(y)) {
8289 break
8290 }
8291 v.reset(OpAMD64FlagGT_ULT)
8292 return true
8293 }
8294
8295
8296
8297 for {
8298 y := auxIntToInt16(v.AuxInt)
8299 if v_0.Op != OpAMD64MOVLconst {
8300 break
8301 }
8302 x := auxIntToInt32(v_0.AuxInt)
8303 if !(int16(x) > y && uint16(x) > uint16(y)) {
8304 break
8305 }
8306 v.reset(OpAMD64FlagGT_UGT)
8307 return true
8308 }
8309
8310
8311
8312 for {
8313 n := auxIntToInt16(v.AuxInt)
8314 if v_0.Op != OpAMD64ANDLconst {
8315 break
8316 }
8317 m := auxIntToInt32(v_0.AuxInt)
8318 if !(0 <= int16(m) && int16(m) < n) {
8319 break
8320 }
8321 v.reset(OpAMD64FlagLT_ULT)
8322 return true
8323 }
8324
8325
8326
8327 for {
8328 if auxIntToInt16(v.AuxInt) != 0 {
8329 break
8330 }
8331 a := v_0
8332 if a.Op != OpAMD64ANDL {
8333 break
8334 }
8335 y := a.Args[1]
8336 x := a.Args[0]
8337 if !(a.Uses == 1) {
8338 break
8339 }
8340 v.reset(OpAMD64TESTW)
8341 v.AddArg2(x, y)
8342 return true
8343 }
8344
8345
8346
8347 for {
8348 if auxIntToInt16(v.AuxInt) != 0 {
8349 break
8350 }
8351 a := v_0
8352 if a.Op != OpAMD64ANDLconst {
8353 break
8354 }
8355 c := auxIntToInt32(a.AuxInt)
8356 x := a.Args[0]
8357 if !(a.Uses == 1) {
8358 break
8359 }
8360 v.reset(OpAMD64TESTWconst)
8361 v.AuxInt = int16ToAuxInt(int16(c))
8362 v.AddArg(x)
8363 return true
8364 }
8365
8366
8367 for {
8368 if auxIntToInt16(v.AuxInt) != 0 {
8369 break
8370 }
8371 x := v_0
8372 v.reset(OpAMD64TESTW)
8373 v.AddArg2(x, x)
8374 return true
8375 }
8376
8377
8378
8379 for {
8380 c := auxIntToInt16(v.AuxInt)
8381 l := v_0
8382 if l.Op != OpAMD64MOVWload {
8383 break
8384 }
8385 off := auxIntToInt32(l.AuxInt)
8386 sym := auxToSym(l.Aux)
8387 mem := l.Args[1]
8388 ptr := l.Args[0]
8389 if !(l.Uses == 1 && clobber(l)) {
8390 break
8391 }
8392 b = l.Block
8393 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8394 v.copyOf(v0)
8395 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
8396 v0.Aux = symToAux(sym)
8397 v0.AddArg2(ptr, mem)
8398 return true
8399 }
8400 return false
8401 }
8402 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8403 v_1 := v.Args[1]
8404 v_0 := v.Args[0]
8405
8406
8407
8408 for {
8409 valoff1 := auxIntToValAndOff(v.AuxInt)
8410 sym := auxToSym(v.Aux)
8411 if v_0.Op != OpAMD64ADDQconst {
8412 break
8413 }
8414 off2 := auxIntToInt32(v_0.AuxInt)
8415 base := v_0.Args[0]
8416 mem := v_1
8417 if !(ValAndOff(valoff1).canAdd32(off2)) {
8418 break
8419 }
8420 v.reset(OpAMD64CMPWconstload)
8421 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8422 v.Aux = symToAux(sym)
8423 v.AddArg2(base, mem)
8424 return true
8425 }
8426
8427
8428
8429 for {
8430 valoff1 := auxIntToValAndOff(v.AuxInt)
8431 sym1 := auxToSym(v.Aux)
8432 if v_0.Op != OpAMD64LEAQ {
8433 break
8434 }
8435 off2 := auxIntToInt32(v_0.AuxInt)
8436 sym2 := auxToSym(v_0.Aux)
8437 base := v_0.Args[0]
8438 mem := v_1
8439 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8440 break
8441 }
8442 v.reset(OpAMD64CMPWconstload)
8443 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8444 v.Aux = symToAux(mergeSym(sym1, sym2))
8445 v.AddArg2(base, mem)
8446 return true
8447 }
8448 return false
8449 }
8450 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8451 v_2 := v.Args[2]
8452 v_1 := v.Args[1]
8453 v_0 := v.Args[0]
8454
8455
8456
8457 for {
8458 off1 := auxIntToInt32(v.AuxInt)
8459 sym := auxToSym(v.Aux)
8460 if v_0.Op != OpAMD64ADDQconst {
8461 break
8462 }
8463 off2 := auxIntToInt32(v_0.AuxInt)
8464 base := v_0.Args[0]
8465 val := v_1
8466 mem := v_2
8467 if !(is32Bit(int64(off1) + int64(off2))) {
8468 break
8469 }
8470 v.reset(OpAMD64CMPWload)
8471 v.AuxInt = int32ToAuxInt(off1 + off2)
8472 v.Aux = symToAux(sym)
8473 v.AddArg3(base, val, mem)
8474 return true
8475 }
8476
8477
8478
8479 for {
8480 off1 := auxIntToInt32(v.AuxInt)
8481 sym1 := auxToSym(v.Aux)
8482 if v_0.Op != OpAMD64LEAQ {
8483 break
8484 }
8485 off2 := auxIntToInt32(v_0.AuxInt)
8486 sym2 := auxToSym(v_0.Aux)
8487 base := v_0.Args[0]
8488 val := v_1
8489 mem := v_2
8490 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8491 break
8492 }
8493 v.reset(OpAMD64CMPWload)
8494 v.AuxInt = int32ToAuxInt(off1 + off2)
8495 v.Aux = symToAux(mergeSym(sym1, sym2))
8496 v.AddArg3(base, val, mem)
8497 return true
8498 }
8499
8500
8501
8502 for {
8503 off := auxIntToInt32(v.AuxInt)
8504 sym := auxToSym(v.Aux)
8505 ptr := v_0
8506 if v_1.Op != OpAMD64MOVLconst {
8507 break
8508 }
8509 c := auxIntToInt32(v_1.AuxInt)
8510 mem := v_2
8511 if !(validValAndOff(int64(int16(c)), int64(off))) {
8512 break
8513 }
8514 v.reset(OpAMD64CMPWconstload)
8515 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
8516 v.Aux = symToAux(sym)
8517 v.AddArg2(ptr, mem)
8518 return true
8519 }
8520 return false
8521 }
8522 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8523 v_3 := v.Args[3]
8524 v_2 := v.Args[2]
8525 v_1 := v.Args[1]
8526 v_0 := v.Args[0]
8527
8528
8529
8530 for {
8531 off1 := auxIntToInt32(v.AuxInt)
8532 sym := auxToSym(v.Aux)
8533 if v_0.Op != OpAMD64ADDQconst {
8534 break
8535 }
8536 off2 := auxIntToInt32(v_0.AuxInt)
8537 ptr := v_0.Args[0]
8538 old := v_1
8539 new_ := v_2
8540 mem := v_3
8541 if !(is32Bit(int64(off1) + int64(off2))) {
8542 break
8543 }
8544 v.reset(OpAMD64CMPXCHGLlock)
8545 v.AuxInt = int32ToAuxInt(off1 + off2)
8546 v.Aux = symToAux(sym)
8547 v.AddArg4(ptr, old, new_, mem)
8548 return true
8549 }
8550 return false
8551 }
8552 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8553 v_3 := v.Args[3]
8554 v_2 := v.Args[2]
8555 v_1 := v.Args[1]
8556 v_0 := v.Args[0]
8557
8558
8559
8560 for {
8561 off1 := auxIntToInt32(v.AuxInt)
8562 sym := auxToSym(v.Aux)
8563 if v_0.Op != OpAMD64ADDQconst {
8564 break
8565 }
8566 off2 := auxIntToInt32(v_0.AuxInt)
8567 ptr := v_0.Args[0]
8568 old := v_1
8569 new_ := v_2
8570 mem := v_3
8571 if !(is32Bit(int64(off1) + int64(off2))) {
8572 break
8573 }
8574 v.reset(OpAMD64CMPXCHGQlock)
8575 v.AuxInt = int32ToAuxInt(off1 + off2)
8576 v.Aux = symToAux(sym)
8577 v.AddArg4(ptr, old, new_, mem)
8578 return true
8579 }
8580 return false
8581 }
8582 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8583 v_1 := v.Args[1]
8584 v_0 := v.Args[0]
8585
8586
8587
8588 for {
8589 x := v_0
8590 l := v_1
8591 if l.Op != OpAMD64MOVSDload {
8592 break
8593 }
8594 off := auxIntToInt32(l.AuxInt)
8595 sym := auxToSym(l.Aux)
8596 mem := l.Args[1]
8597 ptr := l.Args[0]
8598 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8599 break
8600 }
8601 v.reset(OpAMD64DIVSDload)
8602 v.AuxInt = int32ToAuxInt(off)
8603 v.Aux = symToAux(sym)
8604 v.AddArg3(x, ptr, mem)
8605 return true
8606 }
8607 return false
8608 }
8609 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8610 v_2 := v.Args[2]
8611 v_1 := v.Args[1]
8612 v_0 := v.Args[0]
8613
8614
8615
8616 for {
8617 off1 := auxIntToInt32(v.AuxInt)
8618 sym := auxToSym(v.Aux)
8619 val := v_0
8620 if v_1.Op != OpAMD64ADDQconst {
8621 break
8622 }
8623 off2 := auxIntToInt32(v_1.AuxInt)
8624 base := v_1.Args[0]
8625 mem := v_2
8626 if !(is32Bit(int64(off1) + int64(off2))) {
8627 break
8628 }
8629 v.reset(OpAMD64DIVSDload)
8630 v.AuxInt = int32ToAuxInt(off1 + off2)
8631 v.Aux = symToAux(sym)
8632 v.AddArg3(val, base, mem)
8633 return true
8634 }
8635
8636
8637
8638 for {
8639 off1 := auxIntToInt32(v.AuxInt)
8640 sym1 := auxToSym(v.Aux)
8641 val := v_0
8642 if v_1.Op != OpAMD64LEAQ {
8643 break
8644 }
8645 off2 := auxIntToInt32(v_1.AuxInt)
8646 sym2 := auxToSym(v_1.Aux)
8647 base := v_1.Args[0]
8648 mem := v_2
8649 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8650 break
8651 }
8652 v.reset(OpAMD64DIVSDload)
8653 v.AuxInt = int32ToAuxInt(off1 + off2)
8654 v.Aux = symToAux(mergeSym(sym1, sym2))
8655 v.AddArg3(val, base, mem)
8656 return true
8657 }
8658 return false
8659 }
8660 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8661 v_1 := v.Args[1]
8662 v_0 := v.Args[0]
8663
8664
8665
8666 for {
8667 x := v_0
8668 l := v_1
8669 if l.Op != OpAMD64MOVSSload {
8670 break
8671 }
8672 off := auxIntToInt32(l.AuxInt)
8673 sym := auxToSym(l.Aux)
8674 mem := l.Args[1]
8675 ptr := l.Args[0]
8676 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8677 break
8678 }
8679 v.reset(OpAMD64DIVSSload)
8680 v.AuxInt = int32ToAuxInt(off)
8681 v.Aux = symToAux(sym)
8682 v.AddArg3(x, ptr, mem)
8683 return true
8684 }
8685 return false
8686 }
8687 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8688 v_2 := v.Args[2]
8689 v_1 := v.Args[1]
8690 v_0 := v.Args[0]
8691
8692
8693
8694 for {
8695 off1 := auxIntToInt32(v.AuxInt)
8696 sym := auxToSym(v.Aux)
8697 val := v_0
8698 if v_1.Op != OpAMD64ADDQconst {
8699 break
8700 }
8701 off2 := auxIntToInt32(v_1.AuxInt)
8702 base := v_1.Args[0]
8703 mem := v_2
8704 if !(is32Bit(int64(off1) + int64(off2))) {
8705 break
8706 }
8707 v.reset(OpAMD64DIVSSload)
8708 v.AuxInt = int32ToAuxInt(off1 + off2)
8709 v.Aux = symToAux(sym)
8710 v.AddArg3(val, base, mem)
8711 return true
8712 }
8713
8714
8715
8716 for {
8717 off1 := auxIntToInt32(v.AuxInt)
8718 sym1 := auxToSym(v.Aux)
8719 val := v_0
8720 if v_1.Op != OpAMD64LEAQ {
8721 break
8722 }
8723 off2 := auxIntToInt32(v_1.AuxInt)
8724 sym2 := auxToSym(v_1.Aux)
8725 base := v_1.Args[0]
8726 mem := v_2
8727 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8728 break
8729 }
8730 v.reset(OpAMD64DIVSSload)
8731 v.AuxInt = int32ToAuxInt(off1 + off2)
8732 v.Aux = symToAux(mergeSym(sym1, sym2))
8733 v.AddArg3(val, base, mem)
8734 return true
8735 }
8736 return false
8737 }
8738 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8739 v_1 := v.Args[1]
8740 v_0 := v.Args[0]
8741
8742
8743
8744 for {
8745 x := v_0
8746 y := v_1
8747 if !(!x.rematerializeable() && y.rematerializeable()) {
8748 break
8749 }
8750 v.reset(OpAMD64HMULL)
8751 v.AddArg2(y, x)
8752 return true
8753 }
8754 return false
8755 }
8756 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8757 v_1 := v.Args[1]
8758 v_0 := v.Args[0]
8759
8760
8761
8762 for {
8763 x := v_0
8764 y := v_1
8765 if !(!x.rematerializeable() && y.rematerializeable()) {
8766 break
8767 }
8768 v.reset(OpAMD64HMULLU)
8769 v.AddArg2(y, x)
8770 return true
8771 }
8772 return false
8773 }
8774 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8775 v_1 := v.Args[1]
8776 v_0 := v.Args[0]
8777
8778
8779
8780 for {
8781 x := v_0
8782 y := v_1
8783 if !(!x.rematerializeable() && y.rematerializeable()) {
8784 break
8785 }
8786 v.reset(OpAMD64HMULQ)
8787 v.AddArg2(y, x)
8788 return true
8789 }
8790 return false
8791 }
8792 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8793 v_1 := v.Args[1]
8794 v_0 := v.Args[0]
8795
8796
8797
8798 for {
8799 x := v_0
8800 y := v_1
8801 if !(!x.rematerializeable() && y.rematerializeable()) {
8802 break
8803 }
8804 v.reset(OpAMD64HMULQU)
8805 v.AddArg2(y, x)
8806 return true
8807 }
8808 return false
8809 }
8810 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8811 v_0 := v.Args[0]
8812
8813
8814
8815 for {
8816 c := auxIntToInt32(v.AuxInt)
8817 s := auxToSym(v.Aux)
8818 if v_0.Op != OpAMD64ADDLconst {
8819 break
8820 }
8821 d := auxIntToInt32(v_0.AuxInt)
8822 x := v_0.Args[0]
8823 if !(is32Bit(int64(c) + int64(d))) {
8824 break
8825 }
8826 v.reset(OpAMD64LEAL)
8827 v.AuxInt = int32ToAuxInt(c + d)
8828 v.Aux = symToAux(s)
8829 v.AddArg(x)
8830 return true
8831 }
8832
8833
8834
8835 for {
8836 c := auxIntToInt32(v.AuxInt)
8837 s := auxToSym(v.Aux)
8838 if v_0.Op != OpAMD64ADDL {
8839 break
8840 }
8841 _ = v_0.Args[1]
8842 v_0_0 := v_0.Args[0]
8843 v_0_1 := v_0.Args[1]
8844 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8845 x := v_0_0
8846 y := v_0_1
8847 if !(x.Op != OpSB && y.Op != OpSB) {
8848 continue
8849 }
8850 v.reset(OpAMD64LEAL1)
8851 v.AuxInt = int32ToAuxInt(c)
8852 v.Aux = symToAux(s)
8853 v.AddArg2(x, y)
8854 return true
8855 }
8856 break
8857 }
8858 return false
8859 }
8860 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8861 v_1 := v.Args[1]
8862 v_0 := v.Args[0]
8863
8864
8865
8866 for {
8867 c := auxIntToInt32(v.AuxInt)
8868 s := auxToSym(v.Aux)
8869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8870 if v_0.Op != OpAMD64ADDLconst {
8871 continue
8872 }
8873 d := auxIntToInt32(v_0.AuxInt)
8874 x := v_0.Args[0]
8875 y := v_1
8876 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8877 continue
8878 }
8879 v.reset(OpAMD64LEAL1)
8880 v.AuxInt = int32ToAuxInt(c + d)
8881 v.Aux = symToAux(s)
8882 v.AddArg2(x, y)
8883 return true
8884 }
8885 break
8886 }
8887
8888
8889 for {
8890 c := auxIntToInt32(v.AuxInt)
8891 s := auxToSym(v.Aux)
8892 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8893 x := v_0
8894 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8895 continue
8896 }
8897 y := v_1.Args[0]
8898 v.reset(OpAMD64LEAL2)
8899 v.AuxInt = int32ToAuxInt(c)
8900 v.Aux = symToAux(s)
8901 v.AddArg2(x, y)
8902 return true
8903 }
8904 break
8905 }
8906
8907
8908 for {
8909 c := auxIntToInt32(v.AuxInt)
8910 s := auxToSym(v.Aux)
8911 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8912 x := v_0
8913 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8914 continue
8915 }
8916 y := v_1.Args[0]
8917 v.reset(OpAMD64LEAL4)
8918 v.AuxInt = int32ToAuxInt(c)
8919 v.Aux = symToAux(s)
8920 v.AddArg2(x, y)
8921 return true
8922 }
8923 break
8924 }
8925
8926
8927 for {
8928 c := auxIntToInt32(v.AuxInt)
8929 s := auxToSym(v.Aux)
8930 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8931 x := v_0
8932 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8933 continue
8934 }
8935 y := v_1.Args[0]
8936 v.reset(OpAMD64LEAL8)
8937 v.AuxInt = int32ToAuxInt(c)
8938 v.Aux = symToAux(s)
8939 v.AddArg2(x, y)
8940 return true
8941 }
8942 break
8943 }
8944 return false
8945 }
8946 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8947 v_1 := v.Args[1]
8948 v_0 := v.Args[0]
8949
8950
8951
8952 for {
8953 c := auxIntToInt32(v.AuxInt)
8954 s := auxToSym(v.Aux)
8955 if v_0.Op != OpAMD64ADDLconst {
8956 break
8957 }
8958 d := auxIntToInt32(v_0.AuxInt)
8959 x := v_0.Args[0]
8960 y := v_1
8961 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8962 break
8963 }
8964 v.reset(OpAMD64LEAL2)
8965 v.AuxInt = int32ToAuxInt(c + d)
8966 v.Aux = symToAux(s)
8967 v.AddArg2(x, y)
8968 return true
8969 }
8970
8971
8972
8973 for {
8974 c := auxIntToInt32(v.AuxInt)
8975 s := auxToSym(v.Aux)
8976 x := v_0
8977 if v_1.Op != OpAMD64ADDLconst {
8978 break
8979 }
8980 d := auxIntToInt32(v_1.AuxInt)
8981 y := v_1.Args[0]
8982 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8983 break
8984 }
8985 v.reset(OpAMD64LEAL2)
8986 v.AuxInt = int32ToAuxInt(c + 2*d)
8987 v.Aux = symToAux(s)
8988 v.AddArg2(x, y)
8989 return true
8990 }
8991
8992
8993 for {
8994 c := auxIntToInt32(v.AuxInt)
8995 s := auxToSym(v.Aux)
8996 x := v_0
8997 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8998 break
8999 }
9000 y := v_1.Args[0]
9001 v.reset(OpAMD64LEAL4)
9002 v.AuxInt = int32ToAuxInt(c)
9003 v.Aux = symToAux(s)
9004 v.AddArg2(x, y)
9005 return true
9006 }
9007
9008
9009 for {
9010 c := auxIntToInt32(v.AuxInt)
9011 s := auxToSym(v.Aux)
9012 x := v_0
9013 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
9014 break
9015 }
9016 y := v_1.Args[0]
9017 v.reset(OpAMD64LEAL8)
9018 v.AuxInt = int32ToAuxInt(c)
9019 v.Aux = symToAux(s)
9020 v.AddArg2(x, y)
9021 return true
9022 }
9023 return false
9024 }
9025 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
9026 v_1 := v.Args[1]
9027 v_0 := v.Args[0]
9028
9029
9030
9031 for {
9032 c := auxIntToInt32(v.AuxInt)
9033 s := auxToSym(v.Aux)
9034 if v_0.Op != OpAMD64ADDLconst {
9035 break
9036 }
9037 d := auxIntToInt32(v_0.AuxInt)
9038 x := v_0.Args[0]
9039 y := v_1
9040 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9041 break
9042 }
9043 v.reset(OpAMD64LEAL4)
9044 v.AuxInt = int32ToAuxInt(c + d)
9045 v.Aux = symToAux(s)
9046 v.AddArg2(x, y)
9047 return true
9048 }
9049
9050
9051
9052 for {
9053 c := auxIntToInt32(v.AuxInt)
9054 s := auxToSym(v.Aux)
9055 x := v_0
9056 if v_1.Op != OpAMD64ADDLconst {
9057 break
9058 }
9059 d := auxIntToInt32(v_1.AuxInt)
9060 y := v_1.Args[0]
9061 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9062 break
9063 }
9064 v.reset(OpAMD64LEAL4)
9065 v.AuxInt = int32ToAuxInt(c + 4*d)
9066 v.Aux = symToAux(s)
9067 v.AddArg2(x, y)
9068 return true
9069 }
9070
9071
9072 for {
9073 c := auxIntToInt32(v.AuxInt)
9074 s := auxToSym(v.Aux)
9075 x := v_0
9076 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
9077 break
9078 }
9079 y := v_1.Args[0]
9080 v.reset(OpAMD64LEAL8)
9081 v.AuxInt = int32ToAuxInt(c)
9082 v.Aux = symToAux(s)
9083 v.AddArg2(x, y)
9084 return true
9085 }
9086 return false
9087 }
9088 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
9089 v_1 := v.Args[1]
9090 v_0 := v.Args[0]
9091
9092
9093
9094 for {
9095 c := auxIntToInt32(v.AuxInt)
9096 s := auxToSym(v.Aux)
9097 if v_0.Op != OpAMD64ADDLconst {
9098 break
9099 }
9100 d := auxIntToInt32(v_0.AuxInt)
9101 x := v_0.Args[0]
9102 y := v_1
9103 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9104 break
9105 }
9106 v.reset(OpAMD64LEAL8)
9107 v.AuxInt = int32ToAuxInt(c + d)
9108 v.Aux = symToAux(s)
9109 v.AddArg2(x, y)
9110 return true
9111 }
9112
9113
9114
9115 for {
9116 c := auxIntToInt32(v.AuxInt)
9117 s := auxToSym(v.Aux)
9118 x := v_0
9119 if v_1.Op != OpAMD64ADDLconst {
9120 break
9121 }
9122 d := auxIntToInt32(v_1.AuxInt)
9123 y := v_1.Args[0]
9124 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9125 break
9126 }
9127 v.reset(OpAMD64LEAL8)
9128 v.AuxInt = int32ToAuxInt(c + 8*d)
9129 v.Aux = symToAux(s)
9130 v.AddArg2(x, y)
9131 return true
9132 }
9133 return false
9134 }
9135 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
9136 v_0 := v.Args[0]
9137
9138
9139
9140 for {
9141 c := auxIntToInt32(v.AuxInt)
9142 s := auxToSym(v.Aux)
9143 if v_0.Op != OpAMD64ADDQconst {
9144 break
9145 }
9146 d := auxIntToInt32(v_0.AuxInt)
9147 x := v_0.Args[0]
9148 if !(is32Bit(int64(c) + int64(d))) {
9149 break
9150 }
9151 v.reset(OpAMD64LEAQ)
9152 v.AuxInt = int32ToAuxInt(c + d)
9153 v.Aux = symToAux(s)
9154 v.AddArg(x)
9155 return true
9156 }
9157
9158
9159
9160 for {
9161 c := auxIntToInt32(v.AuxInt)
9162 s := auxToSym(v.Aux)
9163 if v_0.Op != OpAMD64ADDQ {
9164 break
9165 }
9166 _ = v_0.Args[1]
9167 v_0_0 := v_0.Args[0]
9168 v_0_1 := v_0.Args[1]
9169 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
9170 x := v_0_0
9171 y := v_0_1
9172 if !(x.Op != OpSB && y.Op != OpSB) {
9173 continue
9174 }
9175 v.reset(OpAMD64LEAQ1)
9176 v.AuxInt = int32ToAuxInt(c)
9177 v.Aux = symToAux(s)
9178 v.AddArg2(x, y)
9179 return true
9180 }
9181 break
9182 }
9183
9184
9185
9186 for {
9187 off1 := auxIntToInt32(v.AuxInt)
9188 sym1 := auxToSym(v.Aux)
9189 if v_0.Op != OpAMD64LEAQ {
9190 break
9191 }
9192 off2 := auxIntToInt32(v_0.AuxInt)
9193 sym2 := auxToSym(v_0.Aux)
9194 x := v_0.Args[0]
9195 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9196 break
9197 }
9198 v.reset(OpAMD64LEAQ)
9199 v.AuxInt = int32ToAuxInt(off1 + off2)
9200 v.Aux = symToAux(mergeSym(sym1, sym2))
9201 v.AddArg(x)
9202 return true
9203 }
9204
9205
9206
9207 for {
9208 off1 := auxIntToInt32(v.AuxInt)
9209 sym1 := auxToSym(v.Aux)
9210 if v_0.Op != OpAMD64LEAQ1 {
9211 break
9212 }
9213 off2 := auxIntToInt32(v_0.AuxInt)
9214 sym2 := auxToSym(v_0.Aux)
9215 y := v_0.Args[1]
9216 x := v_0.Args[0]
9217 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9218 break
9219 }
9220 v.reset(OpAMD64LEAQ1)
9221 v.AuxInt = int32ToAuxInt(off1 + off2)
9222 v.Aux = symToAux(mergeSym(sym1, sym2))
9223 v.AddArg2(x, y)
9224 return true
9225 }
9226
9227
9228
9229 for {
9230 off1 := auxIntToInt32(v.AuxInt)
9231 sym1 := auxToSym(v.Aux)
9232 if v_0.Op != OpAMD64LEAQ2 {
9233 break
9234 }
9235 off2 := auxIntToInt32(v_0.AuxInt)
9236 sym2 := auxToSym(v_0.Aux)
9237 y := v_0.Args[1]
9238 x := v_0.Args[0]
9239 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9240 break
9241 }
9242 v.reset(OpAMD64LEAQ2)
9243 v.AuxInt = int32ToAuxInt(off1 + off2)
9244 v.Aux = symToAux(mergeSym(sym1, sym2))
9245 v.AddArg2(x, y)
9246 return true
9247 }
9248
9249
9250
9251 for {
9252 off1 := auxIntToInt32(v.AuxInt)
9253 sym1 := auxToSym(v.Aux)
9254 if v_0.Op != OpAMD64LEAQ4 {
9255 break
9256 }
9257 off2 := auxIntToInt32(v_0.AuxInt)
9258 sym2 := auxToSym(v_0.Aux)
9259 y := v_0.Args[1]
9260 x := v_0.Args[0]
9261 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9262 break
9263 }
9264 v.reset(OpAMD64LEAQ4)
9265 v.AuxInt = int32ToAuxInt(off1 + off2)
9266 v.Aux = symToAux(mergeSym(sym1, sym2))
9267 v.AddArg2(x, y)
9268 return true
9269 }
9270
9271
9272
9273 for {
9274 off1 := auxIntToInt32(v.AuxInt)
9275 sym1 := auxToSym(v.Aux)
9276 if v_0.Op != OpAMD64LEAQ8 {
9277 break
9278 }
9279 off2 := auxIntToInt32(v_0.AuxInt)
9280 sym2 := auxToSym(v_0.Aux)
9281 y := v_0.Args[1]
9282 x := v_0.Args[0]
9283 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9284 break
9285 }
9286 v.reset(OpAMD64LEAQ8)
9287 v.AuxInt = int32ToAuxInt(off1 + off2)
9288 v.Aux = symToAux(mergeSym(sym1, sym2))
9289 v.AddArg2(x, y)
9290 return true
9291 }
9292 return false
9293 }
9294 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9295 v_1 := v.Args[1]
9296 v_0 := v.Args[0]
9297
9298
9299
9300 for {
9301 c := auxIntToInt32(v.AuxInt)
9302 s := auxToSym(v.Aux)
9303 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9304 if v_0.Op != OpAMD64ADDQconst {
9305 continue
9306 }
9307 d := auxIntToInt32(v_0.AuxInt)
9308 x := v_0.Args[0]
9309 y := v_1
9310 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9311 continue
9312 }
9313 v.reset(OpAMD64LEAQ1)
9314 v.AuxInt = int32ToAuxInt(c + d)
9315 v.Aux = symToAux(s)
9316 v.AddArg2(x, y)
9317 return true
9318 }
9319 break
9320 }
9321
9322
9323 for {
9324 c := auxIntToInt32(v.AuxInt)
9325 s := auxToSym(v.Aux)
9326 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9327 x := v_0
9328 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9329 continue
9330 }
9331 y := v_1.Args[0]
9332 v.reset(OpAMD64LEAQ2)
9333 v.AuxInt = int32ToAuxInt(c)
9334 v.Aux = symToAux(s)
9335 v.AddArg2(x, y)
9336 return true
9337 }
9338 break
9339 }
9340
9341
9342 for {
9343 c := auxIntToInt32(v.AuxInt)
9344 s := auxToSym(v.Aux)
9345 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9346 x := v_0
9347 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9348 continue
9349 }
9350 y := v_1.Args[0]
9351 v.reset(OpAMD64LEAQ4)
9352 v.AuxInt = int32ToAuxInt(c)
9353 v.Aux = symToAux(s)
9354 v.AddArg2(x, y)
9355 return true
9356 }
9357 break
9358 }
9359
9360
9361 for {
9362 c := auxIntToInt32(v.AuxInt)
9363 s := auxToSym(v.Aux)
9364 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9365 x := v_0
9366 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9367 continue
9368 }
9369 y := v_1.Args[0]
9370 v.reset(OpAMD64LEAQ8)
9371 v.AuxInt = int32ToAuxInt(c)
9372 v.Aux = symToAux(s)
9373 v.AddArg2(x, y)
9374 return true
9375 }
9376 break
9377 }
9378
9379
9380
9381 for {
9382 off1 := auxIntToInt32(v.AuxInt)
9383 sym1 := auxToSym(v.Aux)
9384 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9385 if v_0.Op != OpAMD64LEAQ {
9386 continue
9387 }
9388 off2 := auxIntToInt32(v_0.AuxInt)
9389 sym2 := auxToSym(v_0.Aux)
9390 x := v_0.Args[0]
9391 y := v_1
9392 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9393 continue
9394 }
9395 v.reset(OpAMD64LEAQ1)
9396 v.AuxInt = int32ToAuxInt(off1 + off2)
9397 v.Aux = symToAux(mergeSym(sym1, sym2))
9398 v.AddArg2(x, y)
9399 return true
9400 }
9401 break
9402 }
9403
9404
9405
9406 for {
9407 off1 := auxIntToInt32(v.AuxInt)
9408 sym1 := auxToSym(v.Aux)
9409 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9410 x := v_0
9411 if v_1.Op != OpAMD64LEAQ1 {
9412 continue
9413 }
9414 off2 := auxIntToInt32(v_1.AuxInt)
9415 sym2 := auxToSym(v_1.Aux)
9416 y := v_1.Args[1]
9417 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9418 continue
9419 }
9420 v.reset(OpAMD64LEAQ2)
9421 v.AuxInt = int32ToAuxInt(off1 + off2)
9422 v.Aux = symToAux(mergeSym(sym1, sym2))
9423 v.AddArg2(x, y)
9424 return true
9425 }
9426 break
9427 }
9428
9429
9430
9431 for {
9432 off1 := auxIntToInt32(v.AuxInt)
9433 sym1 := auxToSym(v.Aux)
9434 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9435 x := v_0
9436 if v_1.Op != OpAMD64LEAQ1 {
9437 continue
9438 }
9439 off2 := auxIntToInt32(v_1.AuxInt)
9440 sym2 := auxToSym(v_1.Aux)
9441 _ = v_1.Args[1]
9442 v_1_0 := v_1.Args[0]
9443 v_1_1 := v_1.Args[1]
9444 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9445 if x != v_1_0 {
9446 continue
9447 }
9448 y := v_1_1
9449 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9450 continue
9451 }
9452 v.reset(OpAMD64LEAQ2)
9453 v.AuxInt = int32ToAuxInt(off1 + off2)
9454 v.Aux = symToAux(mergeSym(sym1, sym2))
9455 v.AddArg2(y, x)
9456 return true
9457 }
9458 }
9459 break
9460 }
9461
9462
9463
9464 for {
9465 if auxIntToInt32(v.AuxInt) != 0 {
9466 break
9467 }
9468 x := v_0
9469 y := v_1
9470 if !(v.Aux == nil) {
9471 break
9472 }
9473 v.reset(OpAMD64ADDQ)
9474 v.AddArg2(x, y)
9475 return true
9476 }
9477 return false
9478 }
9479 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9480 v_1 := v.Args[1]
9481 v_0 := v.Args[0]
9482
9483
9484
9485 for {
9486 c := auxIntToInt32(v.AuxInt)
9487 s := auxToSym(v.Aux)
9488 if v_0.Op != OpAMD64ADDQconst {
9489 break
9490 }
9491 d := auxIntToInt32(v_0.AuxInt)
9492 x := v_0.Args[0]
9493 y := v_1
9494 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9495 break
9496 }
9497 v.reset(OpAMD64LEAQ2)
9498 v.AuxInt = int32ToAuxInt(c + d)
9499 v.Aux = symToAux(s)
9500 v.AddArg2(x, y)
9501 return true
9502 }
9503
9504
9505
9506 for {
9507 c := auxIntToInt32(v.AuxInt)
9508 s := auxToSym(v.Aux)
9509 x := v_0
9510 if v_1.Op != OpAMD64ADDQconst {
9511 break
9512 }
9513 d := auxIntToInt32(v_1.AuxInt)
9514 y := v_1.Args[0]
9515 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9516 break
9517 }
9518 v.reset(OpAMD64LEAQ2)
9519 v.AuxInt = int32ToAuxInt(c + 2*d)
9520 v.Aux = symToAux(s)
9521 v.AddArg2(x, y)
9522 return true
9523 }
9524
9525
9526 for {
9527 c := auxIntToInt32(v.AuxInt)
9528 s := auxToSym(v.Aux)
9529 x := v_0
9530 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9531 break
9532 }
9533 y := v_1.Args[0]
9534 v.reset(OpAMD64LEAQ4)
9535 v.AuxInt = int32ToAuxInt(c)
9536 v.Aux = symToAux(s)
9537 v.AddArg2(x, y)
9538 return true
9539 }
9540
9541
9542 for {
9543 c := auxIntToInt32(v.AuxInt)
9544 s := auxToSym(v.Aux)
9545 x := v_0
9546 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9547 break
9548 }
9549 y := v_1.Args[0]
9550 v.reset(OpAMD64LEAQ8)
9551 v.AuxInt = int32ToAuxInt(c)
9552 v.Aux = symToAux(s)
9553 v.AddArg2(x, y)
9554 return true
9555 }
9556
9557
9558
9559 for {
9560 off1 := auxIntToInt32(v.AuxInt)
9561 sym1 := auxToSym(v.Aux)
9562 if v_0.Op != OpAMD64LEAQ {
9563 break
9564 }
9565 off2 := auxIntToInt32(v_0.AuxInt)
9566 sym2 := auxToSym(v_0.Aux)
9567 x := v_0.Args[0]
9568 y := v_1
9569 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9570 break
9571 }
9572 v.reset(OpAMD64LEAQ2)
9573 v.AuxInt = int32ToAuxInt(off1 + off2)
9574 v.Aux = symToAux(mergeSym(sym1, sym2))
9575 v.AddArg2(x, y)
9576 return true
9577 }
9578
9579
9580
9581 for {
9582 off1 := auxIntToInt32(v.AuxInt)
9583 sym1 := auxToSym(v.Aux)
9584 x := v_0
9585 if v_1.Op != OpAMD64LEAQ1 {
9586 break
9587 }
9588 off2 := auxIntToInt32(v_1.AuxInt)
9589 sym2 := auxToSym(v_1.Aux)
9590 y := v_1.Args[1]
9591 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9592 break
9593 }
9594 v.reset(OpAMD64LEAQ4)
9595 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9596 v.Aux = symToAux(sym1)
9597 v.AddArg2(x, y)
9598 return true
9599 }
9600
9601
9602
9603 for {
9604 off := auxIntToInt32(v.AuxInt)
9605 sym := auxToSym(v.Aux)
9606 x := v_0
9607 if v_1.Op != OpAMD64MOVQconst {
9608 break
9609 }
9610 scale := auxIntToInt64(v_1.AuxInt)
9611 if !(is32Bit(int64(off) + int64(scale)*2)) {
9612 break
9613 }
9614 v.reset(OpAMD64LEAQ)
9615 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9616 v.Aux = symToAux(sym)
9617 v.AddArg(x)
9618 return true
9619 }
9620
9621
9622
9623 for {
9624 off := auxIntToInt32(v.AuxInt)
9625 sym := auxToSym(v.Aux)
9626 x := v_0
9627 if v_1.Op != OpAMD64MOVLconst {
9628 break
9629 }
9630 scale := auxIntToInt32(v_1.AuxInt)
9631 if !(is32Bit(int64(off) + int64(scale)*2)) {
9632 break
9633 }
9634 v.reset(OpAMD64LEAQ)
9635 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9636 v.Aux = symToAux(sym)
9637 v.AddArg(x)
9638 return true
9639 }
9640 return false
9641 }
9642 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9643 v_1 := v.Args[1]
9644 v_0 := v.Args[0]
9645
9646
9647
9648 for {
9649 c := auxIntToInt32(v.AuxInt)
9650 s := auxToSym(v.Aux)
9651 if v_0.Op != OpAMD64ADDQconst {
9652 break
9653 }
9654 d := auxIntToInt32(v_0.AuxInt)
9655 x := v_0.Args[0]
9656 y := v_1
9657 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9658 break
9659 }
9660 v.reset(OpAMD64LEAQ4)
9661 v.AuxInt = int32ToAuxInt(c + d)
9662 v.Aux = symToAux(s)
9663 v.AddArg2(x, y)
9664 return true
9665 }
9666
9667
9668
9669 for {
9670 c := auxIntToInt32(v.AuxInt)
9671 s := auxToSym(v.Aux)
9672 x := v_0
9673 if v_1.Op != OpAMD64ADDQconst {
9674 break
9675 }
9676 d := auxIntToInt32(v_1.AuxInt)
9677 y := v_1.Args[0]
9678 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9679 break
9680 }
9681 v.reset(OpAMD64LEAQ4)
9682 v.AuxInt = int32ToAuxInt(c + 4*d)
9683 v.Aux = symToAux(s)
9684 v.AddArg2(x, y)
9685 return true
9686 }
9687
9688
9689 for {
9690 c := auxIntToInt32(v.AuxInt)
9691 s := auxToSym(v.Aux)
9692 x := v_0
9693 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9694 break
9695 }
9696 y := v_1.Args[0]
9697 v.reset(OpAMD64LEAQ8)
9698 v.AuxInt = int32ToAuxInt(c)
9699 v.Aux = symToAux(s)
9700 v.AddArg2(x, y)
9701 return true
9702 }
9703
9704
9705
9706 for {
9707 off1 := auxIntToInt32(v.AuxInt)
9708 sym1 := auxToSym(v.Aux)
9709 if v_0.Op != OpAMD64LEAQ {
9710 break
9711 }
9712 off2 := auxIntToInt32(v_0.AuxInt)
9713 sym2 := auxToSym(v_0.Aux)
9714 x := v_0.Args[0]
9715 y := v_1
9716 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9717 break
9718 }
9719 v.reset(OpAMD64LEAQ4)
9720 v.AuxInt = int32ToAuxInt(off1 + off2)
9721 v.Aux = symToAux(mergeSym(sym1, sym2))
9722 v.AddArg2(x, y)
9723 return true
9724 }
9725
9726
9727
9728 for {
9729 off1 := auxIntToInt32(v.AuxInt)
9730 sym1 := auxToSym(v.Aux)
9731 x := v_0
9732 if v_1.Op != OpAMD64LEAQ1 {
9733 break
9734 }
9735 off2 := auxIntToInt32(v_1.AuxInt)
9736 sym2 := auxToSym(v_1.Aux)
9737 y := v_1.Args[1]
9738 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9739 break
9740 }
9741 v.reset(OpAMD64LEAQ8)
9742 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9743 v.Aux = symToAux(sym1)
9744 v.AddArg2(x, y)
9745 return true
9746 }
9747
9748
9749
9750 for {
9751 off := auxIntToInt32(v.AuxInt)
9752 sym := auxToSym(v.Aux)
9753 x := v_0
9754 if v_1.Op != OpAMD64MOVQconst {
9755 break
9756 }
9757 scale := auxIntToInt64(v_1.AuxInt)
9758 if !(is32Bit(int64(off) + int64(scale)*4)) {
9759 break
9760 }
9761 v.reset(OpAMD64LEAQ)
9762 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9763 v.Aux = symToAux(sym)
9764 v.AddArg(x)
9765 return true
9766 }
9767
9768
9769
9770 for {
9771 off := auxIntToInt32(v.AuxInt)
9772 sym := auxToSym(v.Aux)
9773 x := v_0
9774 if v_1.Op != OpAMD64MOVLconst {
9775 break
9776 }
9777 scale := auxIntToInt32(v_1.AuxInt)
9778 if !(is32Bit(int64(off) + int64(scale)*4)) {
9779 break
9780 }
9781 v.reset(OpAMD64LEAQ)
9782 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9783 v.Aux = symToAux(sym)
9784 v.AddArg(x)
9785 return true
9786 }
9787 return false
9788 }
9789 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9790 v_1 := v.Args[1]
9791 v_0 := v.Args[0]
9792
9793
9794
9795 for {
9796 c := auxIntToInt32(v.AuxInt)
9797 s := auxToSym(v.Aux)
9798 if v_0.Op != OpAMD64ADDQconst {
9799 break
9800 }
9801 d := auxIntToInt32(v_0.AuxInt)
9802 x := v_0.Args[0]
9803 y := v_1
9804 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9805 break
9806 }
9807 v.reset(OpAMD64LEAQ8)
9808 v.AuxInt = int32ToAuxInt(c + d)
9809 v.Aux = symToAux(s)
9810 v.AddArg2(x, y)
9811 return true
9812 }
9813
9814
9815
9816 for {
9817 c := auxIntToInt32(v.AuxInt)
9818 s := auxToSym(v.Aux)
9819 x := v_0
9820 if v_1.Op != OpAMD64ADDQconst {
9821 break
9822 }
9823 d := auxIntToInt32(v_1.AuxInt)
9824 y := v_1.Args[0]
9825 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9826 break
9827 }
9828 v.reset(OpAMD64LEAQ8)
9829 v.AuxInt = int32ToAuxInt(c + 8*d)
9830 v.Aux = symToAux(s)
9831 v.AddArg2(x, y)
9832 return true
9833 }
9834
9835
9836
9837 for {
9838 off1 := auxIntToInt32(v.AuxInt)
9839 sym1 := auxToSym(v.Aux)
9840 if v_0.Op != OpAMD64LEAQ {
9841 break
9842 }
9843 off2 := auxIntToInt32(v_0.AuxInt)
9844 sym2 := auxToSym(v_0.Aux)
9845 x := v_0.Args[0]
9846 y := v_1
9847 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9848 break
9849 }
9850 v.reset(OpAMD64LEAQ8)
9851 v.AuxInt = int32ToAuxInt(off1 + off2)
9852 v.Aux = symToAux(mergeSym(sym1, sym2))
9853 v.AddArg2(x, y)
9854 return true
9855 }
9856
9857
9858
9859 for {
9860 off := auxIntToInt32(v.AuxInt)
9861 sym := auxToSym(v.Aux)
9862 x := v_0
9863 if v_1.Op != OpAMD64MOVQconst {
9864 break
9865 }
9866 scale := auxIntToInt64(v_1.AuxInt)
9867 if !(is32Bit(int64(off) + int64(scale)*8)) {
9868 break
9869 }
9870 v.reset(OpAMD64LEAQ)
9871 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9872 v.Aux = symToAux(sym)
9873 v.AddArg(x)
9874 return true
9875 }
9876
9877
9878
9879 for {
9880 off := auxIntToInt32(v.AuxInt)
9881 sym := auxToSym(v.Aux)
9882 x := v_0
9883 if v_1.Op != OpAMD64MOVLconst {
9884 break
9885 }
9886 scale := auxIntToInt32(v_1.AuxInt)
9887 if !(is32Bit(int64(off) + int64(scale)*8)) {
9888 break
9889 }
9890 v.reset(OpAMD64LEAQ)
9891 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9892 v.Aux = symToAux(sym)
9893 v.AddArg(x)
9894 return true
9895 }
9896 return false
9897 }
9898 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9899 v_0 := v.Args[0]
9900 b := v.Block
9901
9902
9903
9904 for {
9905 x := v_0
9906 if x.Op != OpAMD64MOVBload {
9907 break
9908 }
9909 off := auxIntToInt32(x.AuxInt)
9910 sym := auxToSym(x.Aux)
9911 mem := x.Args[1]
9912 ptr := x.Args[0]
9913 if !(x.Uses == 1 && clobber(x)) {
9914 break
9915 }
9916 b = x.Block
9917 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9918 v.copyOf(v0)
9919 v0.AuxInt = int32ToAuxInt(off)
9920 v0.Aux = symToAux(sym)
9921 v0.AddArg2(ptr, mem)
9922 return true
9923 }
9924
9925
9926
9927 for {
9928 x := v_0
9929 if x.Op != OpAMD64MOVWload {
9930 break
9931 }
9932 off := auxIntToInt32(x.AuxInt)
9933 sym := auxToSym(x.Aux)
9934 mem := x.Args[1]
9935 ptr := x.Args[0]
9936 if !(x.Uses == 1 && clobber(x)) {
9937 break
9938 }
9939 b = x.Block
9940 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9941 v.copyOf(v0)
9942 v0.AuxInt = int32ToAuxInt(off)
9943 v0.Aux = symToAux(sym)
9944 v0.AddArg2(ptr, mem)
9945 return true
9946 }
9947
9948
9949
9950 for {
9951 x := v_0
9952 if x.Op != OpAMD64MOVLload {
9953 break
9954 }
9955 off := auxIntToInt32(x.AuxInt)
9956 sym := auxToSym(x.Aux)
9957 mem := x.Args[1]
9958 ptr := x.Args[0]
9959 if !(x.Uses == 1 && clobber(x)) {
9960 break
9961 }
9962 b = x.Block
9963 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9964 v.copyOf(v0)
9965 v0.AuxInt = int32ToAuxInt(off)
9966 v0.Aux = symToAux(sym)
9967 v0.AddArg2(ptr, mem)
9968 return true
9969 }
9970
9971
9972
9973 for {
9974 x := v_0
9975 if x.Op != OpAMD64MOVQload {
9976 break
9977 }
9978 off := auxIntToInt32(x.AuxInt)
9979 sym := auxToSym(x.Aux)
9980 mem := x.Args[1]
9981 ptr := x.Args[0]
9982 if !(x.Uses == 1 && clobber(x)) {
9983 break
9984 }
9985 b = x.Block
9986 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9987 v.copyOf(v0)
9988 v0.AuxInt = int32ToAuxInt(off)
9989 v0.Aux = symToAux(sym)
9990 v0.AddArg2(ptr, mem)
9991 return true
9992 }
9993
9994
9995
9996 for {
9997 if v_0.Op != OpAMD64ANDLconst {
9998 break
9999 }
10000 c := auxIntToInt32(v_0.AuxInt)
10001 x := v_0.Args[0]
10002 if !(c&0x80 == 0) {
10003 break
10004 }
10005 v.reset(OpAMD64ANDLconst)
10006 v.AuxInt = int32ToAuxInt(c & 0x7f)
10007 v.AddArg(x)
10008 return true
10009 }
10010
10011
10012 for {
10013 if v_0.Op != OpAMD64MOVBQSX {
10014 break
10015 }
10016 x := v_0.Args[0]
10017 v.reset(OpAMD64MOVBQSX)
10018 v.AddArg(x)
10019 return true
10020 }
10021 return false
10022 }
10023 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
10024 v_1 := v.Args[1]
10025 v_0 := v.Args[0]
10026
10027
10028
10029 for {
10030 off := auxIntToInt32(v.AuxInt)
10031 sym := auxToSym(v.Aux)
10032 ptr := v_0
10033 if v_1.Op != OpAMD64MOVBstore {
10034 break
10035 }
10036 off2 := auxIntToInt32(v_1.AuxInt)
10037 sym2 := auxToSym(v_1.Aux)
10038 x := v_1.Args[1]
10039 ptr2 := v_1.Args[0]
10040 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10041 break
10042 }
10043 v.reset(OpAMD64MOVBQSX)
10044 v.AddArg(x)
10045 return true
10046 }
10047
10048
10049
10050 for {
10051 off1 := auxIntToInt32(v.AuxInt)
10052 sym1 := auxToSym(v.Aux)
10053 if v_0.Op != OpAMD64LEAQ {
10054 break
10055 }
10056 off2 := auxIntToInt32(v_0.AuxInt)
10057 sym2 := auxToSym(v_0.Aux)
10058 base := v_0.Args[0]
10059 mem := v_1
10060 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10061 break
10062 }
10063 v.reset(OpAMD64MOVBQSXload)
10064 v.AuxInt = int32ToAuxInt(off1 + off2)
10065 v.Aux = symToAux(mergeSym(sym1, sym2))
10066 v.AddArg2(base, mem)
10067 return true
10068 }
10069 return false
10070 }
10071 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
10072 v_0 := v.Args[0]
10073 b := v.Block
10074
10075
10076
10077 for {
10078 x := v_0
10079 if x.Op != OpAMD64MOVBload {
10080 break
10081 }
10082 off := auxIntToInt32(x.AuxInt)
10083 sym := auxToSym(x.Aux)
10084 mem := x.Args[1]
10085 ptr := x.Args[0]
10086 if !(x.Uses == 1 && clobber(x)) {
10087 break
10088 }
10089 b = x.Block
10090 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10091 v.copyOf(v0)
10092 v0.AuxInt = int32ToAuxInt(off)
10093 v0.Aux = symToAux(sym)
10094 v0.AddArg2(ptr, mem)
10095 return true
10096 }
10097
10098
10099
10100 for {
10101 x := v_0
10102 if x.Op != OpAMD64MOVWload {
10103 break
10104 }
10105 off := auxIntToInt32(x.AuxInt)
10106 sym := auxToSym(x.Aux)
10107 mem := x.Args[1]
10108 ptr := x.Args[0]
10109 if !(x.Uses == 1 && clobber(x)) {
10110 break
10111 }
10112 b = x.Block
10113 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10114 v.copyOf(v0)
10115 v0.AuxInt = int32ToAuxInt(off)
10116 v0.Aux = symToAux(sym)
10117 v0.AddArg2(ptr, mem)
10118 return true
10119 }
10120
10121
10122
10123 for {
10124 x := v_0
10125 if x.Op != OpAMD64MOVLload {
10126 break
10127 }
10128 off := auxIntToInt32(x.AuxInt)
10129 sym := auxToSym(x.Aux)
10130 mem := x.Args[1]
10131 ptr := x.Args[0]
10132 if !(x.Uses == 1 && clobber(x)) {
10133 break
10134 }
10135 b = x.Block
10136 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10137 v.copyOf(v0)
10138 v0.AuxInt = int32ToAuxInt(off)
10139 v0.Aux = symToAux(sym)
10140 v0.AddArg2(ptr, mem)
10141 return true
10142 }
10143
10144
10145
10146 for {
10147 x := v_0
10148 if x.Op != OpAMD64MOVQload {
10149 break
10150 }
10151 off := auxIntToInt32(x.AuxInt)
10152 sym := auxToSym(x.Aux)
10153 mem := x.Args[1]
10154 ptr := x.Args[0]
10155 if !(x.Uses == 1 && clobber(x)) {
10156 break
10157 }
10158 b = x.Block
10159 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10160 v.copyOf(v0)
10161 v0.AuxInt = int32ToAuxInt(off)
10162 v0.Aux = symToAux(sym)
10163 v0.AddArg2(ptr, mem)
10164 return true
10165 }
10166
10167
10168
10169 for {
10170 x := v_0
10171 if !(zeroUpper56Bits(x, 3)) {
10172 break
10173 }
10174 v.copyOf(x)
10175 return true
10176 }
10177
10178
10179 for {
10180 if v_0.Op != OpAMD64ANDLconst {
10181 break
10182 }
10183 c := auxIntToInt32(v_0.AuxInt)
10184 x := v_0.Args[0]
10185 v.reset(OpAMD64ANDLconst)
10186 v.AuxInt = int32ToAuxInt(c & 0xff)
10187 v.AddArg(x)
10188 return true
10189 }
10190
10191
10192 for {
10193 if v_0.Op != OpAMD64MOVBQZX {
10194 break
10195 }
10196 x := v_0.Args[0]
10197 v.reset(OpAMD64MOVBQZX)
10198 v.AddArg(x)
10199 return true
10200 }
10201 return false
10202 }
10203 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10204 v_1 := v.Args[1]
10205 v_0 := v.Args[0]
10206
10207
10208
10209 for {
10210 off1 := auxIntToInt32(v.AuxInt)
10211 sym := auxToSym(v.Aux)
10212 if v_0.Op != OpAMD64ADDQconst {
10213 break
10214 }
10215 off2 := auxIntToInt32(v_0.AuxInt)
10216 ptr := v_0.Args[0]
10217 mem := v_1
10218 if !(is32Bit(int64(off1) + int64(off2))) {
10219 break
10220 }
10221 v.reset(OpAMD64MOVBatomicload)
10222 v.AuxInt = int32ToAuxInt(off1 + off2)
10223 v.Aux = symToAux(sym)
10224 v.AddArg2(ptr, mem)
10225 return true
10226 }
10227
10228
10229
10230 for {
10231 off1 := auxIntToInt32(v.AuxInt)
10232 sym1 := auxToSym(v.Aux)
10233 if v_0.Op != OpAMD64LEAQ {
10234 break
10235 }
10236 off2 := auxIntToInt32(v_0.AuxInt)
10237 sym2 := auxToSym(v_0.Aux)
10238 ptr := v_0.Args[0]
10239 mem := v_1
10240 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10241 break
10242 }
10243 v.reset(OpAMD64MOVBatomicload)
10244 v.AuxInt = int32ToAuxInt(off1 + off2)
10245 v.Aux = symToAux(mergeSym(sym1, sym2))
10246 v.AddArg2(ptr, mem)
10247 return true
10248 }
10249 return false
10250 }
10251 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10252 v_1 := v.Args[1]
10253 v_0 := v.Args[0]
10254
10255
10256
10257 for {
10258 off := auxIntToInt32(v.AuxInt)
10259 sym := auxToSym(v.Aux)
10260 ptr := v_0
10261 if v_1.Op != OpAMD64MOVBstore {
10262 break
10263 }
10264 off2 := auxIntToInt32(v_1.AuxInt)
10265 sym2 := auxToSym(v_1.Aux)
10266 x := v_1.Args[1]
10267 ptr2 := v_1.Args[0]
10268 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10269 break
10270 }
10271 v.reset(OpAMD64MOVBQZX)
10272 v.AddArg(x)
10273 return true
10274 }
10275
10276
10277
10278 for {
10279 off1 := auxIntToInt32(v.AuxInt)
10280 sym := auxToSym(v.Aux)
10281 if v_0.Op != OpAMD64ADDQconst {
10282 break
10283 }
10284 off2 := auxIntToInt32(v_0.AuxInt)
10285 ptr := v_0.Args[0]
10286 mem := v_1
10287 if !(is32Bit(int64(off1) + int64(off2))) {
10288 break
10289 }
10290 v.reset(OpAMD64MOVBload)
10291 v.AuxInt = int32ToAuxInt(off1 + off2)
10292 v.Aux = symToAux(sym)
10293 v.AddArg2(ptr, mem)
10294 return true
10295 }
10296
10297
10298
10299 for {
10300 off1 := auxIntToInt32(v.AuxInt)
10301 sym1 := auxToSym(v.Aux)
10302 if v_0.Op != OpAMD64LEAQ {
10303 break
10304 }
10305 off2 := auxIntToInt32(v_0.AuxInt)
10306 sym2 := auxToSym(v_0.Aux)
10307 base := v_0.Args[0]
10308 mem := v_1
10309 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10310 break
10311 }
10312 v.reset(OpAMD64MOVBload)
10313 v.AuxInt = int32ToAuxInt(off1 + off2)
10314 v.Aux = symToAux(mergeSym(sym1, sym2))
10315 v.AddArg2(base, mem)
10316 return true
10317 }
10318
10319
10320
10321 for {
10322 off1 := auxIntToInt32(v.AuxInt)
10323 sym1 := auxToSym(v.Aux)
10324 if v_0.Op != OpAMD64LEAL {
10325 break
10326 }
10327 off2 := auxIntToInt32(v_0.AuxInt)
10328 sym2 := auxToSym(v_0.Aux)
10329 base := v_0.Args[0]
10330 mem := v_1
10331 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
10332 break
10333 }
10334 v.reset(OpAMD64MOVBload)
10335 v.AuxInt = int32ToAuxInt(off1 + off2)
10336 v.Aux = symToAux(mergeSym(sym1, sym2))
10337 v.AddArg2(base, mem)
10338 return true
10339 }
10340
10341
10342
10343 for {
10344 off1 := auxIntToInt32(v.AuxInt)
10345 sym := auxToSym(v.Aux)
10346 if v_0.Op != OpAMD64ADDLconst {
10347 break
10348 }
10349 off2 := auxIntToInt32(v_0.AuxInt)
10350 ptr := v_0.Args[0]
10351 mem := v_1
10352 if !(is32Bit(int64(off1) + int64(off2))) {
10353 break
10354 }
10355 v.reset(OpAMD64MOVBload)
10356 v.AuxInt = int32ToAuxInt(off1 + off2)
10357 v.Aux = symToAux(sym)
10358 v.AddArg2(ptr, mem)
10359 return true
10360 }
10361
10362
10363
10364 for {
10365 off := auxIntToInt32(v.AuxInt)
10366 sym := auxToSym(v.Aux)
10367 if v_0.Op != OpSB || !(symIsRO(sym)) {
10368 break
10369 }
10370 v.reset(OpAMD64MOVLconst)
10371 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10372 return true
10373 }
10374 return false
10375 }
10376 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10377 v_2 := v.Args[2]
10378 v_1 := v.Args[1]
10379 v_0 := v.Args[0]
10380 b := v.Block
10381 typ := &b.Func.Config.Types
10382
10383
10384
10385 for {
10386 off := auxIntToInt32(v.AuxInt)
10387 sym := auxToSym(v.Aux)
10388 ptr := v_0
10389 y := v_1
10390 if y.Op != OpAMD64SETL {
10391 break
10392 }
10393 x := y.Args[0]
10394 mem := v_2
10395 if !(y.Uses == 1) {
10396 break
10397 }
10398 v.reset(OpAMD64SETLstore)
10399 v.AuxInt = int32ToAuxInt(off)
10400 v.Aux = symToAux(sym)
10401 v.AddArg3(ptr, x, mem)
10402 return true
10403 }
10404
10405
10406
10407 for {
10408 off := auxIntToInt32(v.AuxInt)
10409 sym := auxToSym(v.Aux)
10410 ptr := v_0
10411 y := v_1
10412 if y.Op != OpAMD64SETLE {
10413 break
10414 }
10415 x := y.Args[0]
10416 mem := v_2
10417 if !(y.Uses == 1) {
10418 break
10419 }
10420 v.reset(OpAMD64SETLEstore)
10421 v.AuxInt = int32ToAuxInt(off)
10422 v.Aux = symToAux(sym)
10423 v.AddArg3(ptr, x, mem)
10424 return true
10425 }
10426
10427
10428
10429 for {
10430 off := auxIntToInt32(v.AuxInt)
10431 sym := auxToSym(v.Aux)
10432 ptr := v_0
10433 y := v_1
10434 if y.Op != OpAMD64SETG {
10435 break
10436 }
10437 x := y.Args[0]
10438 mem := v_2
10439 if !(y.Uses == 1) {
10440 break
10441 }
10442 v.reset(OpAMD64SETGstore)
10443 v.AuxInt = int32ToAuxInt(off)
10444 v.Aux = symToAux(sym)
10445 v.AddArg3(ptr, x, mem)
10446 return true
10447 }
10448
10449
10450
10451 for {
10452 off := auxIntToInt32(v.AuxInt)
10453 sym := auxToSym(v.Aux)
10454 ptr := v_0
10455 y := v_1
10456 if y.Op != OpAMD64SETGE {
10457 break
10458 }
10459 x := y.Args[0]
10460 mem := v_2
10461 if !(y.Uses == 1) {
10462 break
10463 }
10464 v.reset(OpAMD64SETGEstore)
10465 v.AuxInt = int32ToAuxInt(off)
10466 v.Aux = symToAux(sym)
10467 v.AddArg3(ptr, x, mem)
10468 return true
10469 }
10470
10471
10472
10473 for {
10474 off := auxIntToInt32(v.AuxInt)
10475 sym := auxToSym(v.Aux)
10476 ptr := v_0
10477 y := v_1
10478 if y.Op != OpAMD64SETEQ {
10479 break
10480 }
10481 x := y.Args[0]
10482 mem := v_2
10483 if !(y.Uses == 1) {
10484 break
10485 }
10486 v.reset(OpAMD64SETEQstore)
10487 v.AuxInt = int32ToAuxInt(off)
10488 v.Aux = symToAux(sym)
10489 v.AddArg3(ptr, x, mem)
10490 return true
10491 }
10492
10493
10494
10495 for {
10496 off := auxIntToInt32(v.AuxInt)
10497 sym := auxToSym(v.Aux)
10498 ptr := v_0
10499 y := v_1
10500 if y.Op != OpAMD64SETNE {
10501 break
10502 }
10503 x := y.Args[0]
10504 mem := v_2
10505 if !(y.Uses == 1) {
10506 break
10507 }
10508 v.reset(OpAMD64SETNEstore)
10509 v.AuxInt = int32ToAuxInt(off)
10510 v.Aux = symToAux(sym)
10511 v.AddArg3(ptr, x, mem)
10512 return true
10513 }
10514
10515
10516
10517 for {
10518 off := auxIntToInt32(v.AuxInt)
10519 sym := auxToSym(v.Aux)
10520 ptr := v_0
10521 y := v_1
10522 if y.Op != OpAMD64SETB {
10523 break
10524 }
10525 x := y.Args[0]
10526 mem := v_2
10527 if !(y.Uses == 1) {
10528 break
10529 }
10530 v.reset(OpAMD64SETBstore)
10531 v.AuxInt = int32ToAuxInt(off)
10532 v.Aux = symToAux(sym)
10533 v.AddArg3(ptr, x, mem)
10534 return true
10535 }
10536
10537
10538
10539 for {
10540 off := auxIntToInt32(v.AuxInt)
10541 sym := auxToSym(v.Aux)
10542 ptr := v_0
10543 y := v_1
10544 if y.Op != OpAMD64SETBE {
10545 break
10546 }
10547 x := y.Args[0]
10548 mem := v_2
10549 if !(y.Uses == 1) {
10550 break
10551 }
10552 v.reset(OpAMD64SETBEstore)
10553 v.AuxInt = int32ToAuxInt(off)
10554 v.Aux = symToAux(sym)
10555 v.AddArg3(ptr, x, mem)
10556 return true
10557 }
10558
10559
10560
10561 for {
10562 off := auxIntToInt32(v.AuxInt)
10563 sym := auxToSym(v.Aux)
10564 ptr := v_0
10565 y := v_1
10566 if y.Op != OpAMD64SETA {
10567 break
10568 }
10569 x := y.Args[0]
10570 mem := v_2
10571 if !(y.Uses == 1) {
10572 break
10573 }
10574 v.reset(OpAMD64SETAstore)
10575 v.AuxInt = int32ToAuxInt(off)
10576 v.Aux = symToAux(sym)
10577 v.AddArg3(ptr, x, mem)
10578 return true
10579 }
10580
10581
10582
10583 for {
10584 off := auxIntToInt32(v.AuxInt)
10585 sym := auxToSym(v.Aux)
10586 ptr := v_0
10587 y := v_1
10588 if y.Op != OpAMD64SETAE {
10589 break
10590 }
10591 x := y.Args[0]
10592 mem := v_2
10593 if !(y.Uses == 1) {
10594 break
10595 }
10596 v.reset(OpAMD64SETAEstore)
10597 v.AuxInt = int32ToAuxInt(off)
10598 v.Aux = symToAux(sym)
10599 v.AddArg3(ptr, x, mem)
10600 return true
10601 }
10602
10603
10604 for {
10605 off := auxIntToInt32(v.AuxInt)
10606 sym := auxToSym(v.Aux)
10607 ptr := v_0
10608 if v_1.Op != OpAMD64MOVBQSX {
10609 break
10610 }
10611 x := v_1.Args[0]
10612 mem := v_2
10613 v.reset(OpAMD64MOVBstore)
10614 v.AuxInt = int32ToAuxInt(off)
10615 v.Aux = symToAux(sym)
10616 v.AddArg3(ptr, x, mem)
10617 return true
10618 }
10619
10620
10621 for {
10622 off := auxIntToInt32(v.AuxInt)
10623 sym := auxToSym(v.Aux)
10624 ptr := v_0
10625 if v_1.Op != OpAMD64MOVBQZX {
10626 break
10627 }
10628 x := v_1.Args[0]
10629 mem := v_2
10630 v.reset(OpAMD64MOVBstore)
10631 v.AuxInt = int32ToAuxInt(off)
10632 v.Aux = symToAux(sym)
10633 v.AddArg3(ptr, x, mem)
10634 return true
10635 }
10636
10637
10638
10639 for {
10640 off1 := auxIntToInt32(v.AuxInt)
10641 sym := auxToSym(v.Aux)
10642 if v_0.Op != OpAMD64ADDQconst {
10643 break
10644 }
10645 off2 := auxIntToInt32(v_0.AuxInt)
10646 ptr := v_0.Args[0]
10647 val := v_1
10648 mem := v_2
10649 if !(is32Bit(int64(off1) + int64(off2))) {
10650 break
10651 }
10652 v.reset(OpAMD64MOVBstore)
10653 v.AuxInt = int32ToAuxInt(off1 + off2)
10654 v.Aux = symToAux(sym)
10655 v.AddArg3(ptr, val, mem)
10656 return true
10657 }
10658
10659
10660 for {
10661 off := auxIntToInt32(v.AuxInt)
10662 sym := auxToSym(v.Aux)
10663 ptr := v_0
10664 if v_1.Op != OpAMD64MOVLconst {
10665 break
10666 }
10667 c := auxIntToInt32(v_1.AuxInt)
10668 mem := v_2
10669 v.reset(OpAMD64MOVBstoreconst)
10670 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
10671 v.Aux = symToAux(sym)
10672 v.AddArg2(ptr, mem)
10673 return true
10674 }
10675
10676
10677 for {
10678 off := auxIntToInt32(v.AuxInt)
10679 sym := auxToSym(v.Aux)
10680 ptr := v_0
10681 if v_1.Op != OpAMD64MOVQconst {
10682 break
10683 }
10684 c := auxIntToInt64(v_1.AuxInt)
10685 mem := v_2
10686 v.reset(OpAMD64MOVBstoreconst)
10687 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
10688 v.Aux = symToAux(sym)
10689 v.AddArg2(ptr, mem)
10690 return true
10691 }
10692
10693
10694
10695 for {
10696 off1 := auxIntToInt32(v.AuxInt)
10697 sym1 := auxToSym(v.Aux)
10698 if v_0.Op != OpAMD64LEAQ {
10699 break
10700 }
10701 off2 := auxIntToInt32(v_0.AuxInt)
10702 sym2 := auxToSym(v_0.Aux)
10703 base := v_0.Args[0]
10704 val := v_1
10705 mem := v_2
10706 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10707 break
10708 }
10709 v.reset(OpAMD64MOVBstore)
10710 v.AuxInt = int32ToAuxInt(off1 + off2)
10711 v.Aux = symToAux(mergeSym(sym1, sym2))
10712 v.AddArg3(base, val, mem)
10713 return true
10714 }
10715
10716
10717
10718 for {
10719 i := auxIntToInt32(v.AuxInt)
10720 s := auxToSym(v.Aux)
10721 p := v_0
10722 w := v_1
10723 x0 := v_2
10724 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10725 break
10726 }
10727 mem := x0.Args[2]
10728 if p != x0.Args[0] {
10729 break
10730 }
10731 x0_1 := x0.Args[1]
10732 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10733 break
10734 }
10735 v.reset(OpAMD64MOVWstore)
10736 v.AuxInt = int32ToAuxInt(i - 1)
10737 v.Aux = symToAux(s)
10738 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10739 v0.AuxInt = int8ToAuxInt(8)
10740 v0.AddArg(w)
10741 v.AddArg3(p, v0, mem)
10742 return true
10743 }
10744
10745
10746
10747 for {
10748 i := auxIntToInt32(v.AuxInt)
10749 s := auxToSym(v.Aux)
10750 p1 := v_0
10751 w := v_1
10752 x0 := v_2
10753 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10754 break
10755 }
10756 mem := x0.Args[2]
10757 p0 := x0.Args[0]
10758 x0_1 := x0.Args[1]
10759 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10760 break
10761 }
10762 v.reset(OpAMD64MOVWstore)
10763 v.AuxInt = int32ToAuxInt(i)
10764 v.Aux = symToAux(s)
10765 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10766 v0.AuxInt = int8ToAuxInt(8)
10767 v0.AddArg(w)
10768 v.AddArg3(p0, v0, mem)
10769 return true
10770 }
10771
10772
10773
10774 for {
10775 i := auxIntToInt32(v.AuxInt)
10776 s := auxToSym(v.Aux)
10777 p := v_0
10778 w := v_1
10779 x2 := v_2
10780 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10781 break
10782 }
10783 _ = x2.Args[2]
10784 if p != x2.Args[0] {
10785 break
10786 }
10787 x2_1 := x2.Args[1]
10788 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10789 break
10790 }
10791 x1 := x2.Args[2]
10792 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10793 break
10794 }
10795 _ = x1.Args[2]
10796 if p != x1.Args[0] {
10797 break
10798 }
10799 x1_1 := x1.Args[1]
10800 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10801 break
10802 }
10803 x0 := x1.Args[2]
10804 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10805 break
10806 }
10807 mem := x0.Args[2]
10808 if p != x0.Args[0] {
10809 break
10810 }
10811 x0_1 := x0.Args[1]
10812 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10813 break
10814 }
10815 v.reset(OpAMD64MOVLstore)
10816 v.AuxInt = int32ToAuxInt(i - 3)
10817 v.Aux = symToAux(s)
10818 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10819 v0.AddArg(w)
10820 v.AddArg3(p, v0, mem)
10821 return true
10822 }
10823
10824
10825
10826 for {
10827 i := auxIntToInt32(v.AuxInt)
10828 s := auxToSym(v.Aux)
10829 p3 := v_0
10830 w := v_1
10831 x2 := v_2
10832 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10833 break
10834 }
10835 _ = x2.Args[2]
10836 p2 := x2.Args[0]
10837 x2_1 := x2.Args[1]
10838 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10839 break
10840 }
10841 x1 := x2.Args[2]
10842 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10843 break
10844 }
10845 _ = x1.Args[2]
10846 p1 := x1.Args[0]
10847 x1_1 := x1.Args[1]
10848 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10849 break
10850 }
10851 x0 := x1.Args[2]
10852 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10853 break
10854 }
10855 mem := x0.Args[2]
10856 p0 := x0.Args[0]
10857 x0_1 := x0.Args[1]
10858 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10859 break
10860 }
10861 v.reset(OpAMD64MOVLstore)
10862 v.AuxInt = int32ToAuxInt(i)
10863 v.Aux = symToAux(s)
10864 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10865 v0.AddArg(w)
10866 v.AddArg3(p0, v0, mem)
10867 return true
10868 }
10869
10870
10871
10872 for {
10873 i := auxIntToInt32(v.AuxInt)
10874 s := auxToSym(v.Aux)
10875 p := v_0
10876 w := v_1
10877 x6 := v_2
10878 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10879 break
10880 }
10881 _ = x6.Args[2]
10882 if p != x6.Args[0] {
10883 break
10884 }
10885 x6_1 := x6.Args[1]
10886 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10887 break
10888 }
10889 x5 := x6.Args[2]
10890 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10891 break
10892 }
10893 _ = x5.Args[2]
10894 if p != x5.Args[0] {
10895 break
10896 }
10897 x5_1 := x5.Args[1]
10898 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10899 break
10900 }
10901 x4 := x5.Args[2]
10902 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10903 break
10904 }
10905 _ = x4.Args[2]
10906 if p != x4.Args[0] {
10907 break
10908 }
10909 x4_1 := x4.Args[1]
10910 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10911 break
10912 }
10913 x3 := x4.Args[2]
10914 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10915 break
10916 }
10917 _ = x3.Args[2]
10918 if p != x3.Args[0] {
10919 break
10920 }
10921 x3_1 := x3.Args[1]
10922 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10923 break
10924 }
10925 x2 := x3.Args[2]
10926 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10927 break
10928 }
10929 _ = x2.Args[2]
10930 if p != x2.Args[0] {
10931 break
10932 }
10933 x2_1 := x2.Args[1]
10934 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10935 break
10936 }
10937 x1 := x2.Args[2]
10938 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10939 break
10940 }
10941 _ = x1.Args[2]
10942 if p != x1.Args[0] {
10943 break
10944 }
10945 x1_1 := x1.Args[1]
10946 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10947 break
10948 }
10949 x0 := x1.Args[2]
10950 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10951 break
10952 }
10953 mem := x0.Args[2]
10954 if p != x0.Args[0] {
10955 break
10956 }
10957 x0_1 := x0.Args[1]
10958 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10959 break
10960 }
10961 v.reset(OpAMD64MOVQstore)
10962 v.AuxInt = int32ToAuxInt(i - 7)
10963 v.Aux = symToAux(s)
10964 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10965 v0.AddArg(w)
10966 v.AddArg3(p, v0, mem)
10967 return true
10968 }
10969
10970
10971
10972 for {
10973 i := auxIntToInt32(v.AuxInt)
10974 s := auxToSym(v.Aux)
10975 p7 := v_0
10976 w := v_1
10977 x6 := v_2
10978 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10979 break
10980 }
10981 _ = x6.Args[2]
10982 p6 := x6.Args[0]
10983 x6_1 := x6.Args[1]
10984 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10985 break
10986 }
10987 x5 := x6.Args[2]
10988 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10989 break
10990 }
10991 _ = x5.Args[2]
10992 p5 := x5.Args[0]
10993 x5_1 := x5.Args[1]
10994 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10995 break
10996 }
10997 x4 := x5.Args[2]
10998 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10999 break
11000 }
11001 _ = x4.Args[2]
11002 p4 := x4.Args[0]
11003 x4_1 := x4.Args[1]
11004 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
11005 break
11006 }
11007 x3 := x4.Args[2]
11008 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
11009 break
11010 }
11011 _ = x3.Args[2]
11012 p3 := x3.Args[0]
11013 x3_1 := x3.Args[1]
11014 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
11015 break
11016 }
11017 x2 := x3.Args[2]
11018 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
11019 break
11020 }
11021 _ = x2.Args[2]
11022 p2 := x2.Args[0]
11023 x2_1 := x2.Args[1]
11024 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
11025 break
11026 }
11027 x1 := x2.Args[2]
11028 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
11029 break
11030 }
11031 _ = x1.Args[2]
11032 p1 := x1.Args[0]
11033 x1_1 := x1.Args[1]
11034 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
11035 break
11036 }
11037 x0 := x1.Args[2]
11038 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
11039 break
11040 }
11041 mem := x0.Args[2]
11042 p0 := x0.Args[0]
11043 x0_1 := x0.Args[1]
11044 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
11045 break
11046 }
11047 v.reset(OpAMD64MOVQstore)
11048 v.AuxInt = int32ToAuxInt(i)
11049 v.Aux = symToAux(s)
11050 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
11051 v0.AddArg(w)
11052 v.AddArg3(p0, v0, mem)
11053 return true
11054 }
11055
11056
11057
11058 for {
11059 i := auxIntToInt32(v.AuxInt)
11060 s := auxToSym(v.Aux)
11061 p := v_0
11062 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11063 break
11064 }
11065 w := v_1.Args[0]
11066 x := v_2
11067 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11068 break
11069 }
11070 mem := x.Args[2]
11071 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11072 break
11073 }
11074 v.reset(OpAMD64MOVWstore)
11075 v.AuxInt = int32ToAuxInt(i - 1)
11076 v.Aux = symToAux(s)
11077 v.AddArg3(p, w, mem)
11078 return true
11079 }
11080
11081
11082
11083 for {
11084 i := auxIntToInt32(v.AuxInt)
11085 s := auxToSym(v.Aux)
11086 p := v_0
11087 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11088 break
11089 }
11090 w := v_1.Args[0]
11091 x := v_2
11092 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11093 break
11094 }
11095 mem := x.Args[2]
11096 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11097 break
11098 }
11099 v.reset(OpAMD64MOVWstore)
11100 v.AuxInt = int32ToAuxInt(i - 1)
11101 v.Aux = symToAux(s)
11102 v.AddArg3(p, w, mem)
11103 return true
11104 }
11105
11106
11107
11108 for {
11109 i := auxIntToInt32(v.AuxInt)
11110 s := auxToSym(v.Aux)
11111 p := v_0
11112 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11113 break
11114 }
11115 w := v_1.Args[0]
11116 x := v_2
11117 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11118 break
11119 }
11120 mem := x.Args[2]
11121 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11122 break
11123 }
11124 v.reset(OpAMD64MOVWstore)
11125 v.AuxInt = int32ToAuxInt(i - 1)
11126 v.Aux = symToAux(s)
11127 v.AddArg3(p, w, mem)
11128 return true
11129 }
11130
11131
11132
11133 for {
11134 i := auxIntToInt32(v.AuxInt)
11135 s := auxToSym(v.Aux)
11136 p := v_0
11137 w := v_1
11138 x := v_2
11139 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11140 break
11141 }
11142 mem := x.Args[2]
11143 if p != x.Args[0] {
11144 break
11145 }
11146 x_1 := x.Args[1]
11147 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11148 break
11149 }
11150 v.reset(OpAMD64MOVWstore)
11151 v.AuxInt = int32ToAuxInt(i)
11152 v.Aux = symToAux(s)
11153 v.AddArg3(p, w, mem)
11154 return true
11155 }
11156
11157
11158
11159 for {
11160 i := auxIntToInt32(v.AuxInt)
11161 s := auxToSym(v.Aux)
11162 p := v_0
11163 w := v_1
11164 x := v_2
11165 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11166 break
11167 }
11168 mem := x.Args[2]
11169 if p != x.Args[0] {
11170 break
11171 }
11172 x_1 := x.Args[1]
11173 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11174 break
11175 }
11176 v.reset(OpAMD64MOVWstore)
11177 v.AuxInt = int32ToAuxInt(i)
11178 v.Aux = symToAux(s)
11179 v.AddArg3(p, w, mem)
11180 return true
11181 }
11182
11183
11184
11185 for {
11186 i := auxIntToInt32(v.AuxInt)
11187 s := auxToSym(v.Aux)
11188 p := v_0
11189 w := v_1
11190 x := v_2
11191 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
11192 break
11193 }
11194 mem := x.Args[2]
11195 if p != x.Args[0] {
11196 break
11197 }
11198 x_1 := x.Args[1]
11199 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11200 break
11201 }
11202 v.reset(OpAMD64MOVWstore)
11203 v.AuxInt = int32ToAuxInt(i)
11204 v.Aux = symToAux(s)
11205 v.AddArg3(p, w, mem)
11206 return true
11207 }
11208
11209
11210
11211 for {
11212 i := auxIntToInt32(v.AuxInt)
11213 s := auxToSym(v.Aux)
11214 p := v_0
11215 if v_1.Op != OpAMD64SHRLconst {
11216 break
11217 }
11218 j := auxIntToInt8(v_1.AuxInt)
11219 w := v_1.Args[0]
11220 x := v_2
11221 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11222 break
11223 }
11224 mem := x.Args[2]
11225 if p != x.Args[0] {
11226 break
11227 }
11228 w0 := x.Args[1]
11229 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11230 break
11231 }
11232 v.reset(OpAMD64MOVWstore)
11233 v.AuxInt = int32ToAuxInt(i - 1)
11234 v.Aux = symToAux(s)
11235 v.AddArg3(p, w0, mem)
11236 return true
11237 }
11238
11239
11240
11241 for {
11242 i := auxIntToInt32(v.AuxInt)
11243 s := auxToSym(v.Aux)
11244 p := v_0
11245 if v_1.Op != OpAMD64SHRQconst {
11246 break
11247 }
11248 j := auxIntToInt8(v_1.AuxInt)
11249 w := v_1.Args[0]
11250 x := v_2
11251 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
11252 break
11253 }
11254 mem := x.Args[2]
11255 if p != x.Args[0] {
11256 break
11257 }
11258 w0 := x.Args[1]
11259 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11260 break
11261 }
11262 v.reset(OpAMD64MOVWstore)
11263 v.AuxInt = int32ToAuxInt(i - 1)
11264 v.Aux = symToAux(s)
11265 v.AddArg3(p, w0, mem)
11266 return true
11267 }
11268
11269
11270
11271 for {
11272 i := auxIntToInt32(v.AuxInt)
11273 s := auxToSym(v.Aux)
11274 p1 := v_0
11275 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
11276 break
11277 }
11278 w := v_1.Args[0]
11279 x := v_2
11280 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11281 break
11282 }
11283 mem := x.Args[2]
11284 p0 := x.Args[0]
11285 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11286 break
11287 }
11288 v.reset(OpAMD64MOVWstore)
11289 v.AuxInt = int32ToAuxInt(i)
11290 v.Aux = symToAux(s)
11291 v.AddArg3(p0, w, mem)
11292 return true
11293 }
11294
11295
11296
11297 for {
11298 i := auxIntToInt32(v.AuxInt)
11299 s := auxToSym(v.Aux)
11300 p1 := v_0
11301 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
11302 break
11303 }
11304 w := v_1.Args[0]
11305 x := v_2
11306 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11307 break
11308 }
11309 mem := x.Args[2]
11310 p0 := x.Args[0]
11311 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11312 break
11313 }
11314 v.reset(OpAMD64MOVWstore)
11315 v.AuxInt = int32ToAuxInt(i)
11316 v.Aux = symToAux(s)
11317 v.AddArg3(p0, w, mem)
11318 return true
11319 }
11320
11321
11322
11323 for {
11324 i := auxIntToInt32(v.AuxInt)
11325 s := auxToSym(v.Aux)
11326 p1 := v_0
11327 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
11328 break
11329 }
11330 w := v_1.Args[0]
11331 x := v_2
11332 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11333 break
11334 }
11335 mem := x.Args[2]
11336 p0 := x.Args[0]
11337 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11338 break
11339 }
11340 v.reset(OpAMD64MOVWstore)
11341 v.AuxInt = int32ToAuxInt(i)
11342 v.Aux = symToAux(s)
11343 v.AddArg3(p0, w, mem)
11344 return true
11345 }
11346
11347
11348
11349 for {
11350 i := auxIntToInt32(v.AuxInt)
11351 s := auxToSym(v.Aux)
11352 p0 := v_0
11353 w := v_1
11354 x := v_2
11355 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11356 break
11357 }
11358 mem := x.Args[2]
11359 p1 := x.Args[0]
11360 x_1 := x.Args[1]
11361 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11362 break
11363 }
11364 v.reset(OpAMD64MOVWstore)
11365 v.AuxInt = int32ToAuxInt(i)
11366 v.Aux = symToAux(s)
11367 v.AddArg3(p0, w, mem)
11368 return true
11369 }
11370
11371
11372
11373 for {
11374 i := auxIntToInt32(v.AuxInt)
11375 s := auxToSym(v.Aux)
11376 p0 := v_0
11377 w := v_1
11378 x := v_2
11379 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11380 break
11381 }
11382 mem := x.Args[2]
11383 p1 := x.Args[0]
11384 x_1 := x.Args[1]
11385 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11386 break
11387 }
11388 v.reset(OpAMD64MOVWstore)
11389 v.AuxInt = int32ToAuxInt(i)
11390 v.Aux = symToAux(s)
11391 v.AddArg3(p0, w, mem)
11392 return true
11393 }
11394
11395
11396
11397 for {
11398 i := auxIntToInt32(v.AuxInt)
11399 s := auxToSym(v.Aux)
11400 p0 := v_0
11401 w := v_1
11402 x := v_2
11403 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11404 break
11405 }
11406 mem := x.Args[2]
11407 p1 := x.Args[0]
11408 x_1 := x.Args[1]
11409 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11410 break
11411 }
11412 v.reset(OpAMD64MOVWstore)
11413 v.AuxInt = int32ToAuxInt(i)
11414 v.Aux = symToAux(s)
11415 v.AddArg3(p0, w, mem)
11416 return true
11417 }
11418
11419
11420
11421 for {
11422 i := auxIntToInt32(v.AuxInt)
11423 s := auxToSym(v.Aux)
11424 p1 := v_0
11425 if v_1.Op != OpAMD64SHRLconst {
11426 break
11427 }
11428 j := auxIntToInt8(v_1.AuxInt)
11429 w := v_1.Args[0]
11430 x := v_2
11431 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11432 break
11433 }
11434 mem := x.Args[2]
11435 p0 := x.Args[0]
11436 w0 := x.Args[1]
11437 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11438 break
11439 }
11440 v.reset(OpAMD64MOVWstore)
11441 v.AuxInt = int32ToAuxInt(i)
11442 v.Aux = symToAux(s)
11443 v.AddArg3(p0, w0, mem)
11444 return true
11445 }
11446
11447
11448
11449 for {
11450 i := auxIntToInt32(v.AuxInt)
11451 s := auxToSym(v.Aux)
11452 p1 := v_0
11453 if v_1.Op != OpAMD64SHRQconst {
11454 break
11455 }
11456 j := auxIntToInt8(v_1.AuxInt)
11457 w := v_1.Args[0]
11458 x := v_2
11459 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11460 break
11461 }
11462 mem := x.Args[2]
11463 p0 := x.Args[0]
11464 w0 := x.Args[1]
11465 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11466 break
11467 }
11468 v.reset(OpAMD64MOVWstore)
11469 v.AuxInt = int32ToAuxInt(i)
11470 v.Aux = symToAux(s)
11471 v.AddArg3(p0, w0, mem)
11472 return true
11473 }
11474
11475
11476
11477 for {
11478 i := auxIntToInt32(v.AuxInt)
11479 s := auxToSym(v.Aux)
11480 p := v_0
11481 x1 := v_1
11482 if x1.Op != OpAMD64MOVBload {
11483 break
11484 }
11485 j := auxIntToInt32(x1.AuxInt)
11486 s2 := auxToSym(x1.Aux)
11487 mem := x1.Args[1]
11488 p2 := x1.Args[0]
11489 mem2 := v_2
11490 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11491 break
11492 }
11493 _ = mem2.Args[2]
11494 if p != mem2.Args[0] {
11495 break
11496 }
11497 x2 := mem2.Args[1]
11498 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11499 break
11500 }
11501 _ = x2.Args[1]
11502 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11503 break
11504 }
11505 v.reset(OpAMD64MOVWstore)
11506 v.AuxInt = int32ToAuxInt(i - 1)
11507 v.Aux = symToAux(s)
11508 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11509 v0.AuxInt = int32ToAuxInt(j - 1)
11510 v0.Aux = symToAux(s2)
11511 v0.AddArg2(p2, mem)
11512 v.AddArg3(p, v0, mem)
11513 return true
11514 }
11515
11516
11517
11518 for {
11519 off1 := auxIntToInt32(v.AuxInt)
11520 sym1 := auxToSym(v.Aux)
11521 if v_0.Op != OpAMD64LEAL {
11522 break
11523 }
11524 off2 := auxIntToInt32(v_0.AuxInt)
11525 sym2 := auxToSym(v_0.Aux)
11526 base := v_0.Args[0]
11527 val := v_1
11528 mem := v_2
11529 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
11530 break
11531 }
11532 v.reset(OpAMD64MOVBstore)
11533 v.AuxInt = int32ToAuxInt(off1 + off2)
11534 v.Aux = symToAux(mergeSym(sym1, sym2))
11535 v.AddArg3(base, val, mem)
11536 return true
11537 }
11538
11539
11540
11541 for {
11542 off1 := auxIntToInt32(v.AuxInt)
11543 sym := auxToSym(v.Aux)
11544 if v_0.Op != OpAMD64ADDLconst {
11545 break
11546 }
11547 off2 := auxIntToInt32(v_0.AuxInt)
11548 ptr := v_0.Args[0]
11549 val := v_1
11550 mem := v_2
11551 if !(is32Bit(int64(off1) + int64(off2))) {
11552 break
11553 }
11554 v.reset(OpAMD64MOVBstore)
11555 v.AuxInt = int32ToAuxInt(off1 + off2)
11556 v.Aux = symToAux(sym)
11557 v.AddArg3(ptr, val, mem)
11558 return true
11559 }
11560 return false
11561 }
11562 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11563 v_1 := v.Args[1]
11564 v_0 := v.Args[0]
11565
11566
11567
11568 for {
11569 sc := auxIntToValAndOff(v.AuxInt)
11570 s := auxToSym(v.Aux)
11571 if v_0.Op != OpAMD64ADDQconst {
11572 break
11573 }
11574 off := auxIntToInt32(v_0.AuxInt)
11575 ptr := v_0.Args[0]
11576 mem := v_1
11577 if !(ValAndOff(sc).canAdd32(off)) {
11578 break
11579 }
11580 v.reset(OpAMD64MOVBstoreconst)
11581 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11582 v.Aux = symToAux(s)
11583 v.AddArg2(ptr, mem)
11584 return true
11585 }
11586
11587
11588
11589 for {
11590 sc := auxIntToValAndOff(v.AuxInt)
11591 sym1 := auxToSym(v.Aux)
11592 if v_0.Op != OpAMD64LEAQ {
11593 break
11594 }
11595 off := auxIntToInt32(v_0.AuxInt)
11596 sym2 := auxToSym(v_0.Aux)
11597 ptr := v_0.Args[0]
11598 mem := v_1
11599 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11600 break
11601 }
11602 v.reset(OpAMD64MOVBstoreconst)
11603 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11604 v.Aux = symToAux(mergeSym(sym1, sym2))
11605 v.AddArg2(ptr, mem)
11606 return true
11607 }
11608
11609
11610
11611 for {
11612 c := auxIntToValAndOff(v.AuxInt)
11613 s := auxToSym(v.Aux)
11614 p := v_0
11615 x := v_1
11616 if x.Op != OpAMD64MOVBstoreconst {
11617 break
11618 }
11619 a := auxIntToValAndOff(x.AuxInt)
11620 if auxToSym(x.Aux) != s {
11621 break
11622 }
11623 mem := x.Args[1]
11624 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11625 break
11626 }
11627 v.reset(OpAMD64MOVWstoreconst)
11628 v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
11629 v.Aux = symToAux(s)
11630 v.AddArg2(p, mem)
11631 return true
11632 }
11633
11634
11635
11636 for {
11637 a := auxIntToValAndOff(v.AuxInt)
11638 s := auxToSym(v.Aux)
11639 p := v_0
11640 x := v_1
11641 if x.Op != OpAMD64MOVBstoreconst {
11642 break
11643 }
11644 c := auxIntToValAndOff(x.AuxInt)
11645 if auxToSym(x.Aux) != s {
11646 break
11647 }
11648 mem := x.Args[1]
11649 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11650 break
11651 }
11652 v.reset(OpAMD64MOVWstoreconst)
11653 v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
11654 v.Aux = symToAux(s)
11655 v.AddArg2(p, mem)
11656 return true
11657 }
11658
11659
11660
11661 for {
11662 sc := auxIntToValAndOff(v.AuxInt)
11663 sym1 := auxToSym(v.Aux)
11664 if v_0.Op != OpAMD64LEAL {
11665 break
11666 }
11667 off := auxIntToInt32(v_0.AuxInt)
11668 sym2 := auxToSym(v_0.Aux)
11669 ptr := v_0.Args[0]
11670 mem := v_1
11671 if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
11672 break
11673 }
11674 v.reset(OpAMD64MOVBstoreconst)
11675 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11676 v.Aux = symToAux(mergeSym(sym1, sym2))
11677 v.AddArg2(ptr, mem)
11678 return true
11679 }
11680
11681
11682
11683 for {
11684 sc := auxIntToValAndOff(v.AuxInt)
11685 s := auxToSym(v.Aux)
11686 if v_0.Op != OpAMD64ADDLconst {
11687 break
11688 }
11689 off := auxIntToInt32(v_0.AuxInt)
11690 ptr := v_0.Args[0]
11691 mem := v_1
11692 if !(sc.canAdd32(off)) {
11693 break
11694 }
11695 v.reset(OpAMD64MOVBstoreconst)
11696 v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
11697 v.Aux = symToAux(s)
11698 v.AddArg2(ptr, mem)
11699 return true
11700 }
11701 return false
11702 }
11703 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11704 v_0 := v.Args[0]
11705 b := v.Block
11706
11707
11708
11709 for {
11710 x := v_0
11711 if x.Op != OpAMD64MOVLload {
11712 break
11713 }
11714 off := auxIntToInt32(x.AuxInt)
11715 sym := auxToSym(x.Aux)
11716 mem := x.Args[1]
11717 ptr := x.Args[0]
11718 if !(x.Uses == 1 && clobber(x)) {
11719 break
11720 }
11721 b = x.Block
11722 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11723 v.copyOf(v0)
11724 v0.AuxInt = int32ToAuxInt(off)
11725 v0.Aux = symToAux(sym)
11726 v0.AddArg2(ptr, mem)
11727 return true
11728 }
11729
11730
11731
11732 for {
11733 x := v_0
11734 if x.Op != OpAMD64MOVQload {
11735 break
11736 }
11737 off := auxIntToInt32(x.AuxInt)
11738 sym := auxToSym(x.Aux)
11739 mem := x.Args[1]
11740 ptr := x.Args[0]
11741 if !(x.Uses == 1 && clobber(x)) {
11742 break
11743 }
11744 b = x.Block
11745 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11746 v.copyOf(v0)
11747 v0.AuxInt = int32ToAuxInt(off)
11748 v0.Aux = symToAux(sym)
11749 v0.AddArg2(ptr, mem)
11750 return true
11751 }
11752
11753
11754
11755 for {
11756 if v_0.Op != OpAMD64ANDLconst {
11757 break
11758 }
11759 c := auxIntToInt32(v_0.AuxInt)
11760 x := v_0.Args[0]
11761 if !(uint32(c)&0x80000000 == 0) {
11762 break
11763 }
11764 v.reset(OpAMD64ANDLconst)
11765 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11766 v.AddArg(x)
11767 return true
11768 }
11769
11770
11771 for {
11772 if v_0.Op != OpAMD64MOVLQSX {
11773 break
11774 }
11775 x := v_0.Args[0]
11776 v.reset(OpAMD64MOVLQSX)
11777 v.AddArg(x)
11778 return true
11779 }
11780
11781
11782 for {
11783 if v_0.Op != OpAMD64MOVWQSX {
11784 break
11785 }
11786 x := v_0.Args[0]
11787 v.reset(OpAMD64MOVWQSX)
11788 v.AddArg(x)
11789 return true
11790 }
11791
11792
11793 for {
11794 if v_0.Op != OpAMD64MOVBQSX {
11795 break
11796 }
11797 x := v_0.Args[0]
11798 v.reset(OpAMD64MOVBQSX)
11799 v.AddArg(x)
11800 return true
11801 }
11802 return false
11803 }
11804 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11805 v_1 := v.Args[1]
11806 v_0 := v.Args[0]
11807
11808
11809
11810 for {
11811 off := auxIntToInt32(v.AuxInt)
11812 sym := auxToSym(v.Aux)
11813 ptr := v_0
11814 if v_1.Op != OpAMD64MOVLstore {
11815 break
11816 }
11817 off2 := auxIntToInt32(v_1.AuxInt)
11818 sym2 := auxToSym(v_1.Aux)
11819 x := v_1.Args[1]
11820 ptr2 := v_1.Args[0]
11821 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11822 break
11823 }
11824 v.reset(OpAMD64MOVLQSX)
11825 v.AddArg(x)
11826 return true
11827 }
11828
11829
11830
11831 for {
11832 off1 := auxIntToInt32(v.AuxInt)
11833 sym1 := auxToSym(v.Aux)
11834 if v_0.Op != OpAMD64LEAQ {
11835 break
11836 }
11837 off2 := auxIntToInt32(v_0.AuxInt)
11838 sym2 := auxToSym(v_0.Aux)
11839 base := v_0.Args[0]
11840 mem := v_1
11841 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11842 break
11843 }
11844 v.reset(OpAMD64MOVLQSXload)
11845 v.AuxInt = int32ToAuxInt(off1 + off2)
11846 v.Aux = symToAux(mergeSym(sym1, sym2))
11847 v.AddArg2(base, mem)
11848 return true
11849 }
11850 return false
11851 }
11852 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11853 v_0 := v.Args[0]
11854 b := v.Block
11855
11856
11857
11858 for {
11859 x := v_0
11860 if x.Op != OpAMD64MOVLload {
11861 break
11862 }
11863 off := auxIntToInt32(x.AuxInt)
11864 sym := auxToSym(x.Aux)
11865 mem := x.Args[1]
11866 ptr := x.Args[0]
11867 if !(x.Uses == 1 && clobber(x)) {
11868 break
11869 }
11870 b = x.Block
11871 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11872 v.copyOf(v0)
11873 v0.AuxInt = int32ToAuxInt(off)
11874 v0.Aux = symToAux(sym)
11875 v0.AddArg2(ptr, mem)
11876 return true
11877 }
11878
11879
11880
11881 for {
11882 x := v_0
11883 if x.Op != OpAMD64MOVQload {
11884 break
11885 }
11886 off := auxIntToInt32(x.AuxInt)
11887 sym := auxToSym(x.Aux)
11888 mem := x.Args[1]
11889 ptr := x.Args[0]
11890 if !(x.Uses == 1 && clobber(x)) {
11891 break
11892 }
11893 b = x.Block
11894 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11895 v.copyOf(v0)
11896 v0.AuxInt = int32ToAuxInt(off)
11897 v0.Aux = symToAux(sym)
11898 v0.AddArg2(ptr, mem)
11899 return true
11900 }
11901
11902
11903
11904 for {
11905 x := v_0
11906 if !(zeroUpper32Bits(x, 3)) {
11907 break
11908 }
11909 v.copyOf(x)
11910 return true
11911 }
11912
11913
11914 for {
11915 if v_0.Op != OpAMD64ANDLconst {
11916 break
11917 }
11918 c := auxIntToInt32(v_0.AuxInt)
11919 x := v_0.Args[0]
11920 v.reset(OpAMD64ANDLconst)
11921 v.AuxInt = int32ToAuxInt(c)
11922 v.AddArg(x)
11923 return true
11924 }
11925
11926
11927 for {
11928 if v_0.Op != OpAMD64MOVLQZX {
11929 break
11930 }
11931 x := v_0.Args[0]
11932 v.reset(OpAMD64MOVLQZX)
11933 v.AddArg(x)
11934 return true
11935 }
11936
11937
11938 for {
11939 if v_0.Op != OpAMD64MOVWQZX {
11940 break
11941 }
11942 x := v_0.Args[0]
11943 v.reset(OpAMD64MOVWQZX)
11944 v.AddArg(x)
11945 return true
11946 }
11947
11948
11949 for {
11950 if v_0.Op != OpAMD64MOVBQZX {
11951 break
11952 }
11953 x := v_0.Args[0]
11954 v.reset(OpAMD64MOVBQZX)
11955 v.AddArg(x)
11956 return true
11957 }
11958 return false
11959 }
11960 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
11961 v_1 := v.Args[1]
11962 v_0 := v.Args[0]
11963
11964
11965
11966 for {
11967 off1 := auxIntToInt32(v.AuxInt)
11968 sym := auxToSym(v.Aux)
11969 if v_0.Op != OpAMD64ADDQconst {
11970 break
11971 }
11972 off2 := auxIntToInt32(v_0.AuxInt)
11973 ptr := v_0.Args[0]
11974 mem := v_1
11975 if !(is32Bit(int64(off1) + int64(off2))) {
11976 break
11977 }
11978 v.reset(OpAMD64MOVLatomicload)
11979 v.AuxInt = int32ToAuxInt(off1 + off2)
11980 v.Aux = symToAux(sym)
11981 v.AddArg2(ptr, mem)
11982 return true
11983 }
11984
11985
11986
11987 for {
11988 off1 := auxIntToInt32(v.AuxInt)
11989 sym1 := auxToSym(v.Aux)
11990 if v_0.Op != OpAMD64LEAQ {
11991 break
11992 }
11993 off2 := auxIntToInt32(v_0.AuxInt)
11994 sym2 := auxToSym(v_0.Aux)
11995 ptr := v_0.Args[0]
11996 mem := v_1
11997 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11998 break
11999 }
12000 v.reset(OpAMD64MOVLatomicload)
12001 v.AuxInt = int32ToAuxInt(off1 + off2)
12002 v.Aux = symToAux(mergeSym(sym1, sym2))
12003 v.AddArg2(ptr, mem)
12004 return true
12005 }
12006 return false
12007 }
12008 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
12009 v_0 := v.Args[0]
12010 b := v.Block
12011
12012
12013
12014 for {
12015 t := v.Type
12016 if v_0.Op != OpArg {
12017 break
12018 }
12019 u := v_0.Type
12020 off := auxIntToInt32(v_0.AuxInt)
12021 sym := auxToSym(v_0.Aux)
12022 if !(t.Size() == u.Size()) {
12023 break
12024 }
12025 b = b.Func.Entry
12026 v0 := b.NewValue0(v.Pos, OpArg, t)
12027 v.copyOf(v0)
12028 v0.AuxInt = int32ToAuxInt(off)
12029 v0.Aux = symToAux(sym)
12030 return true
12031 }
12032 return false
12033 }
12034 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
12035 v_0 := v.Args[0]
12036 b := v.Block
12037
12038
12039
12040 for {
12041 t := v.Type
12042 if v_0.Op != OpArg {
12043 break
12044 }
12045 u := v_0.Type
12046 off := auxIntToInt32(v_0.AuxInt)
12047 sym := auxToSym(v_0.Aux)
12048 if !(t.Size() == u.Size()) {
12049 break
12050 }
12051 b = b.Func.Entry
12052 v0 := b.NewValue0(v.Pos, OpArg, t)
12053 v.copyOf(v0)
12054 v0.AuxInt = int32ToAuxInt(off)
12055 v0.Aux = symToAux(sym)
12056 return true
12057 }
12058 return false
12059 }
12060 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
12061 v_1 := v.Args[1]
12062 v_0 := v.Args[0]
12063 b := v.Block
12064 config := b.Func.Config
12065
12066
12067
12068 for {
12069 off := auxIntToInt32(v.AuxInt)
12070 sym := auxToSym(v.Aux)
12071 ptr := v_0
12072 if v_1.Op != OpAMD64MOVLstore {
12073 break
12074 }
12075 off2 := auxIntToInt32(v_1.AuxInt)
12076 sym2 := auxToSym(v_1.Aux)
12077 x := v_1.Args[1]
12078 ptr2 := v_1.Args[0]
12079 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12080 break
12081 }
12082 v.reset(OpAMD64MOVLQZX)
12083 v.AddArg(x)
12084 return true
12085 }
12086
12087
12088
12089 for {
12090 off1 := auxIntToInt32(v.AuxInt)
12091 sym := auxToSym(v.Aux)
12092 if v_0.Op != OpAMD64ADDQconst {
12093 break
12094 }
12095 off2 := auxIntToInt32(v_0.AuxInt)
12096 ptr := v_0.Args[0]
12097 mem := v_1
12098 if !(is32Bit(int64(off1) + int64(off2))) {
12099 break
12100 }
12101 v.reset(OpAMD64MOVLload)
12102 v.AuxInt = int32ToAuxInt(off1 + off2)
12103 v.Aux = symToAux(sym)
12104 v.AddArg2(ptr, mem)
12105 return true
12106 }
12107
12108
12109
12110 for {
12111 off1 := auxIntToInt32(v.AuxInt)
12112 sym1 := auxToSym(v.Aux)
12113 if v_0.Op != OpAMD64LEAQ {
12114 break
12115 }
12116 off2 := auxIntToInt32(v_0.AuxInt)
12117 sym2 := auxToSym(v_0.Aux)
12118 base := v_0.Args[0]
12119 mem := v_1
12120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12121 break
12122 }
12123 v.reset(OpAMD64MOVLload)
12124 v.AuxInt = int32ToAuxInt(off1 + off2)
12125 v.Aux = symToAux(mergeSym(sym1, sym2))
12126 v.AddArg2(base, mem)
12127 return true
12128 }
12129
12130
12131
12132 for {
12133 off1 := auxIntToInt32(v.AuxInt)
12134 sym1 := auxToSym(v.Aux)
12135 if v_0.Op != OpAMD64LEAL {
12136 break
12137 }
12138 off2 := auxIntToInt32(v_0.AuxInt)
12139 sym2 := auxToSym(v_0.Aux)
12140 base := v_0.Args[0]
12141 mem := v_1
12142 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12143 break
12144 }
12145 v.reset(OpAMD64MOVLload)
12146 v.AuxInt = int32ToAuxInt(off1 + off2)
12147 v.Aux = symToAux(mergeSym(sym1, sym2))
12148 v.AddArg2(base, mem)
12149 return true
12150 }
12151
12152
12153
12154 for {
12155 off1 := auxIntToInt32(v.AuxInt)
12156 sym := auxToSym(v.Aux)
12157 if v_0.Op != OpAMD64ADDLconst {
12158 break
12159 }
12160 off2 := auxIntToInt32(v_0.AuxInt)
12161 ptr := v_0.Args[0]
12162 mem := v_1
12163 if !(is32Bit(int64(off1) + int64(off2))) {
12164 break
12165 }
12166 v.reset(OpAMD64MOVLload)
12167 v.AuxInt = int32ToAuxInt(off1 + off2)
12168 v.Aux = symToAux(sym)
12169 v.AddArg2(ptr, mem)
12170 return true
12171 }
12172
12173
12174 for {
12175 off := auxIntToInt32(v.AuxInt)
12176 sym := auxToSym(v.Aux)
12177 ptr := v_0
12178 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12179 break
12180 }
12181 val := v_1.Args[1]
12182 if ptr != v_1.Args[0] {
12183 break
12184 }
12185 v.reset(OpAMD64MOVLf2i)
12186 v.AddArg(val)
12187 return true
12188 }
12189
12190
12191
12192 for {
12193 off := auxIntToInt32(v.AuxInt)
12194 sym := auxToSym(v.Aux)
12195 if v_0.Op != OpSB || !(symIsRO(sym)) {
12196 break
12197 }
12198 v.reset(OpAMD64MOVQconst)
12199 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12200 return true
12201 }
12202 return false
12203 }
12204 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
12205 v_2 := v.Args[2]
12206 v_1 := v.Args[1]
12207 v_0 := v.Args[0]
12208 b := v.Block
12209 typ := &b.Func.Config.Types
12210
12211
12212 for {
12213 off := auxIntToInt32(v.AuxInt)
12214 sym := auxToSym(v.Aux)
12215 ptr := v_0
12216 if v_1.Op != OpAMD64MOVLQSX {
12217 break
12218 }
12219 x := v_1.Args[0]
12220 mem := v_2
12221 v.reset(OpAMD64MOVLstore)
12222 v.AuxInt = int32ToAuxInt(off)
12223 v.Aux = symToAux(sym)
12224 v.AddArg3(ptr, x, mem)
12225 return true
12226 }
12227
12228
12229 for {
12230 off := auxIntToInt32(v.AuxInt)
12231 sym := auxToSym(v.Aux)
12232 ptr := v_0
12233 if v_1.Op != OpAMD64MOVLQZX {
12234 break
12235 }
12236 x := v_1.Args[0]
12237 mem := v_2
12238 v.reset(OpAMD64MOVLstore)
12239 v.AuxInt = int32ToAuxInt(off)
12240 v.Aux = symToAux(sym)
12241 v.AddArg3(ptr, x, mem)
12242 return true
12243 }
12244
12245
12246
12247 for {
12248 off1 := auxIntToInt32(v.AuxInt)
12249 sym := auxToSym(v.Aux)
12250 if v_0.Op != OpAMD64ADDQconst {
12251 break
12252 }
12253 off2 := auxIntToInt32(v_0.AuxInt)
12254 ptr := v_0.Args[0]
12255 val := v_1
12256 mem := v_2
12257 if !(is32Bit(int64(off1) + int64(off2))) {
12258 break
12259 }
12260 v.reset(OpAMD64MOVLstore)
12261 v.AuxInt = int32ToAuxInt(off1 + off2)
12262 v.Aux = symToAux(sym)
12263 v.AddArg3(ptr, val, mem)
12264 return true
12265 }
12266
12267
12268 for {
12269 off := auxIntToInt32(v.AuxInt)
12270 sym := auxToSym(v.Aux)
12271 ptr := v_0
12272 if v_1.Op != OpAMD64MOVLconst {
12273 break
12274 }
12275 c := auxIntToInt32(v_1.AuxInt)
12276 mem := v_2
12277 v.reset(OpAMD64MOVLstoreconst)
12278 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
12279 v.Aux = symToAux(sym)
12280 v.AddArg2(ptr, mem)
12281 return true
12282 }
12283
12284
12285 for {
12286 off := auxIntToInt32(v.AuxInt)
12287 sym := auxToSym(v.Aux)
12288 ptr := v_0
12289 if v_1.Op != OpAMD64MOVQconst {
12290 break
12291 }
12292 c := auxIntToInt64(v_1.AuxInt)
12293 mem := v_2
12294 v.reset(OpAMD64MOVLstoreconst)
12295 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
12296 v.Aux = symToAux(sym)
12297 v.AddArg2(ptr, mem)
12298 return true
12299 }
12300
12301
12302
12303 for {
12304 off1 := auxIntToInt32(v.AuxInt)
12305 sym1 := auxToSym(v.Aux)
12306 if v_0.Op != OpAMD64LEAQ {
12307 break
12308 }
12309 off2 := auxIntToInt32(v_0.AuxInt)
12310 sym2 := auxToSym(v_0.Aux)
12311 base := v_0.Args[0]
12312 val := v_1
12313 mem := v_2
12314 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12315 break
12316 }
12317 v.reset(OpAMD64MOVLstore)
12318 v.AuxInt = int32ToAuxInt(off1 + off2)
12319 v.Aux = symToAux(mergeSym(sym1, sym2))
12320 v.AddArg3(base, val, mem)
12321 return true
12322 }
12323
12324
12325
12326 for {
12327 i := auxIntToInt32(v.AuxInt)
12328 s := auxToSym(v.Aux)
12329 p := v_0
12330 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12331 break
12332 }
12333 w := v_1.Args[0]
12334 x := v_2
12335 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12336 break
12337 }
12338 mem := x.Args[2]
12339 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
12340 break
12341 }
12342 v.reset(OpAMD64MOVQstore)
12343 v.AuxInt = int32ToAuxInt(i - 4)
12344 v.Aux = symToAux(s)
12345 v.AddArg3(p, w, mem)
12346 return true
12347 }
12348
12349
12350
12351 for {
12352 i := auxIntToInt32(v.AuxInt)
12353 s := auxToSym(v.Aux)
12354 p := v_0
12355 if v_1.Op != OpAMD64SHRQconst {
12356 break
12357 }
12358 j := auxIntToInt8(v_1.AuxInt)
12359 w := v_1.Args[0]
12360 x := v_2
12361 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
12362 break
12363 }
12364 mem := x.Args[2]
12365 if p != x.Args[0] {
12366 break
12367 }
12368 w0 := x.Args[1]
12369 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
12370 break
12371 }
12372 v.reset(OpAMD64MOVQstore)
12373 v.AuxInt = int32ToAuxInt(i - 4)
12374 v.Aux = symToAux(s)
12375 v.AddArg3(p, w0, mem)
12376 return true
12377 }
12378
12379
12380
12381 for {
12382 i := auxIntToInt32(v.AuxInt)
12383 s := auxToSym(v.Aux)
12384 p1 := v_0
12385 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
12386 break
12387 }
12388 w := v_1.Args[0]
12389 x := v_2
12390 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12391 break
12392 }
12393 mem := x.Args[2]
12394 p0 := x.Args[0]
12395 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12396 break
12397 }
12398 v.reset(OpAMD64MOVQstore)
12399 v.AuxInt = int32ToAuxInt(i)
12400 v.Aux = symToAux(s)
12401 v.AddArg3(p0, w, mem)
12402 return true
12403 }
12404
12405
12406
12407 for {
12408 i := auxIntToInt32(v.AuxInt)
12409 s := auxToSym(v.Aux)
12410 p1 := v_0
12411 if v_1.Op != OpAMD64SHRQconst {
12412 break
12413 }
12414 j := auxIntToInt8(v_1.AuxInt)
12415 w := v_1.Args[0]
12416 x := v_2
12417 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
12418 break
12419 }
12420 mem := x.Args[2]
12421 p0 := x.Args[0]
12422 w0 := x.Args[1]
12423 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12424 break
12425 }
12426 v.reset(OpAMD64MOVQstore)
12427 v.AuxInt = int32ToAuxInt(i)
12428 v.Aux = symToAux(s)
12429 v.AddArg3(p0, w0, mem)
12430 return true
12431 }
12432
12433
12434
12435 for {
12436 i := auxIntToInt32(v.AuxInt)
12437 s := auxToSym(v.Aux)
12438 p := v_0
12439 x1 := v_1
12440 if x1.Op != OpAMD64MOVLload {
12441 break
12442 }
12443 j := auxIntToInt32(x1.AuxInt)
12444 s2 := auxToSym(x1.Aux)
12445 mem := x1.Args[1]
12446 p2 := x1.Args[0]
12447 mem2 := v_2
12448 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
12449 break
12450 }
12451 _ = mem2.Args[2]
12452 if p != mem2.Args[0] {
12453 break
12454 }
12455 x2 := mem2.Args[1]
12456 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
12457 break
12458 }
12459 _ = x2.Args[1]
12460 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
12461 break
12462 }
12463 v.reset(OpAMD64MOVQstore)
12464 v.AuxInt = int32ToAuxInt(i - 4)
12465 v.Aux = symToAux(s)
12466 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
12467 v0.AuxInt = int32ToAuxInt(j - 4)
12468 v0.Aux = symToAux(s2)
12469 v0.AddArg2(p2, mem)
12470 v.AddArg3(p, v0, mem)
12471 return true
12472 }
12473
12474
12475
12476 for {
12477 off1 := auxIntToInt32(v.AuxInt)
12478 sym1 := auxToSym(v.Aux)
12479 if v_0.Op != OpAMD64LEAL {
12480 break
12481 }
12482 off2 := auxIntToInt32(v_0.AuxInt)
12483 sym2 := auxToSym(v_0.Aux)
12484 base := v_0.Args[0]
12485 val := v_1
12486 mem := v_2
12487 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
12488 break
12489 }
12490 v.reset(OpAMD64MOVLstore)
12491 v.AuxInt = int32ToAuxInt(off1 + off2)
12492 v.Aux = symToAux(mergeSym(sym1, sym2))
12493 v.AddArg3(base, val, mem)
12494 return true
12495 }
12496
12497
12498
12499 for {
12500 off1 := auxIntToInt32(v.AuxInt)
12501 sym := auxToSym(v.Aux)
12502 if v_0.Op != OpAMD64ADDLconst {
12503 break
12504 }
12505 off2 := auxIntToInt32(v_0.AuxInt)
12506 ptr := v_0.Args[0]
12507 val := v_1
12508 mem := v_2
12509 if !(is32Bit(int64(off1) + int64(off2))) {
12510 break
12511 }
12512 v.reset(OpAMD64MOVLstore)
12513 v.AuxInt = int32ToAuxInt(off1 + off2)
12514 v.Aux = symToAux(sym)
12515 v.AddArg3(ptr, val, mem)
12516 return true
12517 }
12518
12519
12520
12521 for {
12522 off := auxIntToInt32(v.AuxInt)
12523 sym := auxToSym(v.Aux)
12524 ptr := v_0
12525 y := v_1
12526 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12527 break
12528 }
12529 mem := y.Args[2]
12530 x := y.Args[0]
12531 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12532 break
12533 }
12534 v.reset(OpAMD64ADDLmodify)
12535 v.AuxInt = int32ToAuxInt(off)
12536 v.Aux = symToAux(sym)
12537 v.AddArg3(ptr, x, mem)
12538 return true
12539 }
12540
12541
12542
12543 for {
12544 off := auxIntToInt32(v.AuxInt)
12545 sym := auxToSym(v.Aux)
12546 ptr := v_0
12547 y := v_1
12548 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12549 break
12550 }
12551 mem := y.Args[2]
12552 x := y.Args[0]
12553 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12554 break
12555 }
12556 v.reset(OpAMD64ANDLmodify)
12557 v.AuxInt = int32ToAuxInt(off)
12558 v.Aux = symToAux(sym)
12559 v.AddArg3(ptr, x, mem)
12560 return true
12561 }
12562
12563
12564
12565 for {
12566 off := auxIntToInt32(v.AuxInt)
12567 sym := auxToSym(v.Aux)
12568 ptr := v_0
12569 y := v_1
12570 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12571 break
12572 }
12573 mem := y.Args[2]
12574 x := y.Args[0]
12575 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12576 break
12577 }
12578 v.reset(OpAMD64ORLmodify)
12579 v.AuxInt = int32ToAuxInt(off)
12580 v.Aux = symToAux(sym)
12581 v.AddArg3(ptr, x, mem)
12582 return true
12583 }
12584
12585
12586
12587 for {
12588 off := auxIntToInt32(v.AuxInt)
12589 sym := auxToSym(v.Aux)
12590 ptr := v_0
12591 y := v_1
12592 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12593 break
12594 }
12595 mem := y.Args[2]
12596 x := y.Args[0]
12597 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12598 break
12599 }
12600 v.reset(OpAMD64XORLmodify)
12601 v.AuxInt = int32ToAuxInt(off)
12602 v.Aux = symToAux(sym)
12603 v.AddArg3(ptr, x, mem)
12604 return true
12605 }
12606
12607
12608
12609 for {
12610 off := auxIntToInt32(v.AuxInt)
12611 sym := auxToSym(v.Aux)
12612 ptr := v_0
12613 y := v_1
12614 if y.Op != OpAMD64ADDL {
12615 break
12616 }
12617 _ = y.Args[1]
12618 y_0 := y.Args[0]
12619 y_1 := y.Args[1]
12620 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12621 l := y_0
12622 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12623 continue
12624 }
12625 mem := l.Args[1]
12626 if ptr != l.Args[0] {
12627 continue
12628 }
12629 x := y_1
12630 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12631 continue
12632 }
12633 v.reset(OpAMD64ADDLmodify)
12634 v.AuxInt = int32ToAuxInt(off)
12635 v.Aux = symToAux(sym)
12636 v.AddArg3(ptr, x, mem)
12637 return true
12638 }
12639 break
12640 }
12641
12642
12643
12644 for {
12645 off := auxIntToInt32(v.AuxInt)
12646 sym := auxToSym(v.Aux)
12647 ptr := v_0
12648 y := v_1
12649 if y.Op != OpAMD64SUBL {
12650 break
12651 }
12652 x := y.Args[1]
12653 l := y.Args[0]
12654 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12655 break
12656 }
12657 mem := l.Args[1]
12658 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12659 break
12660 }
12661 v.reset(OpAMD64SUBLmodify)
12662 v.AuxInt = int32ToAuxInt(off)
12663 v.Aux = symToAux(sym)
12664 v.AddArg3(ptr, x, mem)
12665 return true
12666 }
12667
12668
12669
12670 for {
12671 off := auxIntToInt32(v.AuxInt)
12672 sym := auxToSym(v.Aux)
12673 ptr := v_0
12674 y := v_1
12675 if y.Op != OpAMD64ANDL {
12676 break
12677 }
12678 _ = y.Args[1]
12679 y_0 := y.Args[0]
12680 y_1 := y.Args[1]
12681 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12682 l := y_0
12683 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12684 continue
12685 }
12686 mem := l.Args[1]
12687 if ptr != l.Args[0] {
12688 continue
12689 }
12690 x := y_1
12691 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12692 continue
12693 }
12694 v.reset(OpAMD64ANDLmodify)
12695 v.AuxInt = int32ToAuxInt(off)
12696 v.Aux = symToAux(sym)
12697 v.AddArg3(ptr, x, mem)
12698 return true
12699 }
12700 break
12701 }
12702
12703
12704
12705 for {
12706 off := auxIntToInt32(v.AuxInt)
12707 sym := auxToSym(v.Aux)
12708 ptr := v_0
12709 y := v_1
12710 if y.Op != OpAMD64ORL {
12711 break
12712 }
12713 _ = y.Args[1]
12714 y_0 := y.Args[0]
12715 y_1 := y.Args[1]
12716 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12717 l := y_0
12718 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12719 continue
12720 }
12721 mem := l.Args[1]
12722 if ptr != l.Args[0] {
12723 continue
12724 }
12725 x := y_1
12726 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12727 continue
12728 }
12729 v.reset(OpAMD64ORLmodify)
12730 v.AuxInt = int32ToAuxInt(off)
12731 v.Aux = symToAux(sym)
12732 v.AddArg3(ptr, x, mem)
12733 return true
12734 }
12735 break
12736 }
12737
12738
12739
12740 for {
12741 off := auxIntToInt32(v.AuxInt)
12742 sym := auxToSym(v.Aux)
12743 ptr := v_0
12744 y := v_1
12745 if y.Op != OpAMD64XORL {
12746 break
12747 }
12748 _ = y.Args[1]
12749 y_0 := y.Args[0]
12750 y_1 := y.Args[1]
12751 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12752 l := y_0
12753 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12754 continue
12755 }
12756 mem := l.Args[1]
12757 if ptr != l.Args[0] {
12758 continue
12759 }
12760 x := y_1
12761 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12762 continue
12763 }
12764 v.reset(OpAMD64XORLmodify)
12765 v.AuxInt = int32ToAuxInt(off)
12766 v.Aux = symToAux(sym)
12767 v.AddArg3(ptr, x, mem)
12768 return true
12769 }
12770 break
12771 }
12772
12773
12774
12775 for {
12776 off := auxIntToInt32(v.AuxInt)
12777 sym := auxToSym(v.Aux)
12778 ptr := v_0
12779 y := v_1
12780 if y.Op != OpAMD64BTCL {
12781 break
12782 }
12783 t := y.Type
12784 x := y.Args[1]
12785 l := y.Args[0]
12786 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12787 break
12788 }
12789 mem := l.Args[1]
12790 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12791 break
12792 }
12793 v.reset(OpAMD64BTCLmodify)
12794 v.AuxInt = int32ToAuxInt(off)
12795 v.Aux = symToAux(sym)
12796 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12797 v0.AuxInt = int32ToAuxInt(31)
12798 v0.AddArg(x)
12799 v.AddArg3(ptr, v0, mem)
12800 return true
12801 }
12802
12803
12804
12805 for {
12806 off := auxIntToInt32(v.AuxInt)
12807 sym := auxToSym(v.Aux)
12808 ptr := v_0
12809 y := v_1
12810 if y.Op != OpAMD64BTRL {
12811 break
12812 }
12813 t := y.Type
12814 x := y.Args[1]
12815 l := y.Args[0]
12816 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12817 break
12818 }
12819 mem := l.Args[1]
12820 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12821 break
12822 }
12823 v.reset(OpAMD64BTRLmodify)
12824 v.AuxInt = int32ToAuxInt(off)
12825 v.Aux = symToAux(sym)
12826 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12827 v0.AuxInt = int32ToAuxInt(31)
12828 v0.AddArg(x)
12829 v.AddArg3(ptr, v0, mem)
12830 return true
12831 }
12832
12833
12834
12835 for {
12836 off := auxIntToInt32(v.AuxInt)
12837 sym := auxToSym(v.Aux)
12838 ptr := v_0
12839 y := v_1
12840 if y.Op != OpAMD64BTSL {
12841 break
12842 }
12843 t := y.Type
12844 x := y.Args[1]
12845 l := y.Args[0]
12846 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12847 break
12848 }
12849 mem := l.Args[1]
12850 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12851 break
12852 }
12853 v.reset(OpAMD64BTSLmodify)
12854 v.AuxInt = int32ToAuxInt(off)
12855 v.Aux = symToAux(sym)
12856 v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
12857 v0.AuxInt = int32ToAuxInt(31)
12858 v0.AddArg(x)
12859 v.AddArg3(ptr, v0, mem)
12860 return true
12861 }
12862
12863
12864
12865 for {
12866 off := auxIntToInt32(v.AuxInt)
12867 sym := auxToSym(v.Aux)
12868 ptr := v_0
12869 a := v_1
12870 if a.Op != OpAMD64ADDLconst {
12871 break
12872 }
12873 c := auxIntToInt32(a.AuxInt)
12874 l := a.Args[0]
12875 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12876 break
12877 }
12878 mem := l.Args[1]
12879 ptr2 := l.Args[0]
12880 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
12881 break
12882 }
12883 v.reset(OpAMD64ADDLconstmodify)
12884 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
12885 v.Aux = symToAux(sym)
12886 v.AddArg2(ptr, mem)
12887 return true
12888 }
12889
12890
12891
12892 for {
12893 off := auxIntToInt32(v.AuxInt)
12894 sym := auxToSym(v.Aux)
12895 ptr := v_0
12896 a := v_1
12897 if a.Op != OpAMD64ANDLconst {
12898 break
12899 }
12900 c := auxIntToInt32(a.AuxInt)
12901 l := a.Args[0]
12902 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12903 break
12904 }
12905 mem := l.Args[1]
12906 ptr2 := l.Args[0]
12907 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
12908 break
12909 }
12910 v.reset(OpAMD64ANDLconstmodify)
12911 v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
12912 v.Aux = symToAux(sym)
12913 v.AddArg2(ptr, mem)
12914 return true
12915 }
12916
12917
12918
12919 for {
12920 off := auxIntToInt32(v.AuxInt)
12921 sym := auxToSym(v.Aux)
12922 ptr := v_0
12923 a := v_1
12924 if a.Op != OpAMD64ORLconst {
12925 break
12926 }
12927 c := auxIntToInt32(a.AuxInt)
12928 l := a.Args[0]
12929